code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[int] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[int]=10 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=0.6 , lowerCAmelCase_ : Union[str, Any]=None , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
_a = TFViTMAEModel(config=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
_a = TFViTMAEForPreTraining(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected sequence length = num_patches
_a = (self.image_size // self.patch_size) ** 2
_a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a = 1
_a = TFViTMAEForPreTraining(lowerCAmelCase_ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
_a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a)) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = TFViTMAEModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
_a = copy.deepcopy(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = model(**lowerCAmelCase_ , noise=lowerCAmelCase_ )
_a = outputs_dict[0].numpy()
_a = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCAmelCase_ : List[str] ):
_a = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase_ ):
_a = v.numpy()
else:
_a = np.array(lowerCAmelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = prepare_numpy_arrays(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ , noise=lowerCAmelCase_ )
self.assert_outputs_same(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
_a = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.constant(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a = tf_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCAmelCase_ , lowerCAmelCase_ ),)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase_ , '''_keras_serializable''' , lowerCAmelCase_ )
}
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.convert_to_tensor(lowerCAmelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_a = main_layer_class(lowerCAmelCase_ )
_a = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_a = tf.keras.Model(lowerCAmelCase_ , outputs=main_layer(lowerCAmelCase_ ) )
_a = model(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''keras_model.h5''' )
model.save(lowerCAmelCase_ )
_a = tf.keras.models.load_model(
lowerCAmelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase_ , tf.keras.Model )
_a = model(lowerCAmelCase_ )
self.assert_outputs_same(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
_a = outputs.last_hidden_state.numpy()
_a = 0
else:
_a = outputs.logits.numpy()
_a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ , saved_model=lowerCAmelCase_ )
_a = model_class.from_pretrained(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
_a = after_outputs['''last_hidden_state'''].numpy()
_a = 0
else:
_a = after_outputs['''logits'''].numpy()
_a = 0
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
_a = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase_ )
_a = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_a = model_class.from_config(model.config )
_a = new_model(lowerCAmelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
_a = new_model(lowerCAmelCase_ , noise=lowerCAmelCase_ )
self.assert_outputs_same(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
np.random.seed(2 )
_a = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a = ViTMAEConfig()
_a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a = np.random.uniform(size=(1, num_patches) )
# forward pass
_a = model(**lowerCAmelCase_ , noise=lowerCAmelCase_ )
# verify the logits
_a = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 22 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger('transformers.models.speecht5')
_snake_case : List[Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case : Union[str, Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case : Optional[int] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case : List[Any] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case : List[str] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case : Tuple = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case : List[str] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Optional[int] = []
_snake_case : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case : List[str] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case : Tuple = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_a = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
_a = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(UpperCamelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''' , UpperCamelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : int ):
'''simple docstring'''
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Any=None , ):
'''simple docstring'''
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
_a = 1876
_a = 600
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
_a = 1876
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
_a = torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( _a ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
_a = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.image_processor
def __call__( self : str , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_a = self.tokenizer
_a = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_a = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __lowerCAmelCase ( self : List[str] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = []
for part_id in partition_order:
_a = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(UpperCamelCase ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(10 ).repartition(2 )
_a = [1, 0]
_a = _generate_iterable_examples(UpperCamelCase , UpperCamelCase ) # Reverse the partitions.
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(10 ).repartition(1 )
_a = SparkExamplesIterable(UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
_a = lambda UpperCamelCase : x.reverse()
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [2, 1, 0] )
_a = SparkExamplesIterable(UpperCamelCase ).shuffle_data_sources(UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_a = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_a = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
_a = nn.Parameter(UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
_a = nn.Parameter(UpperCamelCase )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = np.asarray(weights[0] )
_a = np.asarray(weights[1] )
_a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
_a = np.asarray(weights[0] )
_a = np.asarray(weights[1] )
_a = np.asarray(weights[2] )
_a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase ).view(-1 , UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : int ):
'''simple docstring'''
_a = weights[0][0][0]
_a = np.asarray(layer_norm_a[0] )
_a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# lsh weights + output
_a = weights[0][1]
if len(UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase , torch_block.attention , UpperCamelCase )
else:
set_layer_weights_in_torch_local(UpperCamelCase , torch_block.attention , UpperCamelCase )
# intermediate weighs
_a = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase ) == 4:
_a = intermediate_weights[2]
# layernorm 2
_a = np.asarray(intermediate_weights[0][0] )
_a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# intermediate dense
_a = np.asarray(intermediate_weights[1][0] )
_a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
# intermediate out
_a = np.asarray(intermediate_weights[4][0] )
_a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = torch_model.reformer
# word embeds
_a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase ) , )
if isinstance(weights[3] , UpperCamelCase ):
_a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
_a = nn.Parameter(torch.tensor(UpperCamelCase ) )
_a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# output layer norm
_a = np.asarray(weights[7][0] )
_a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) , )
# output embeddings
_a = np.asarray(weights[9][0] )
_a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase ) , )
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = ReformerConfig.from_json_file(UpperCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = ReformerModelWithLMHead(UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as f:
_a = pickle.load(UpperCamelCase )['''weights''']
set_model_weights_in_torch(UpperCamelCase , UpperCamelCase , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A ( _a ):
lowercase_ = 'sew-d'
def __init__( self : Tuple , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : List[str]=2_56 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=("p2c", "c2p") , lowerCAmelCase_ : Dict="layer_norm" , lowerCAmelCase_ : List[Any]="gelu_python" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1e-7 , lowerCAmelCase_ : Any=1e-5 , lowerCAmelCase_ : Tuple="group" , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Optional[int]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Union[str, Any]=1_28 , lowerCAmelCase_ : Union[str, Any]=16 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=0.0_5 , lowerCAmelCase_ : Optional[int]=10 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Tuple=10 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple="mean" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=2_56 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Any=2 , **lowerCAmelCase_ : str , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = squeeze_factor
_a = max_position_embeddings
_a = position_buckets
_a = share_att_key
_a = relative_attention
_a = norm_rel_ebd
_a = list(lowerCAmelCase_ )
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layer_norm_eps
_a = feature_layer_norm_eps
_a = initializer_range
_a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# sequence classification
_a = use_weighted_layer_sum
_a = classifier_proj_size
@property
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , ) -> Union[str, Any]:
"""simple docstring"""
_a = parent
_a = out_indices if out_indices is not None else [4]
_a = stage_names
_a = out_features
_a = backbone
_a = batch_size
_a = image_size
_a = num_channels
_a = use_pretrained_backbone
_a = is_training
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (TimmBackbone,) if is_torch_available() else ()
lowercase_ = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = TimmBackboneModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = '''resnet18'''
_a = '''microsoft/resnet-18'''
_a = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
_a = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_a = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
_a = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = self.has_attentions
# no need to test all models as different heads yield the same functionality
_a = self.all_model_classes[0]
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ )
_a = outputs[0][-1]
# Encoder-/Decoder-only models
_a = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_a = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_a = copy.deepcopy(lowerCAmelCase_ )
_a = None
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_a = copy.deepcopy(lowerCAmelCase_ )
_a = False
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(**lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
_snake_case : Union[str, Any] = [0, 2, 4, 6, 8]
_snake_case : Optional[int] = [1, 3, 5, 7, 9]
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_a = 0
for digit in range(10 ):
_a = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase , UpperCamelCase )
return result
_a = 0
for digita in range(10 ):
_a = digita
if (remainder + digita) % 2 == 0:
_a = ODD_DIGITS
else:
_a = EVEN_DIGITS
for digita in other_parity_digits:
_a = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase , UpperCamelCase , )
return result
def snake_case_ (UpperCamelCase : int = 9 ):
'''simple docstring'''
_a = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase , 0 , [0] * length , UpperCamelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : dict , UpperCamelCase : str ):
'''simple docstring'''
_a , _a = set(UpperCamelCase ), [start]
while stack:
_a = stack.pop()
explored.add(UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase )
return explored
_snake_case : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Dict=None , ) -> Optional[int]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = projection_dim
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = dropout
_a = attention_dropout
_a = max_position_embeddings
_a = initializer_range
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a = input_mask.numpy()
_a , _a = input_mask.shape
_a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
_a = 1
_a = 0
_a = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = TFBlipTextModel(config=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( _a ,unittest.TestCase ):
lowercase_ = (TFBlipTextModel,) if is_tf_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = BlipTextModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=True ) -> str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_snake_case : Optional[Any] = data_utils.TransfoXLTokenizer
_snake_case : int = data_utils.TransfoXLCorpus
_snake_case : Union[str, Any] = data_utils
_snake_case : List[Any] = data_utils
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase , '''rb''' ) as fp:
_a = pickle.load(UpperCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
_a = corpus.vocab.__dict__
torch.save(UpperCamelCase , UpperCamelCase )
_a = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , UpperCamelCase )
_a = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(UpperCamelCase , UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_a = os.path.abspath(UpperCamelCase )
_a = os.path.abspath(UpperCamelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_a = TransfoXLConfig()
else:
_a = TransfoXLConfig.from_json_file(UpperCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = TransfoXLLMHeadModel(UpperCamelCase )
_a = load_tf_weights_in_transfo_xl(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
_a = os.path.join(UpperCamelCase , UpperCamelCase )
_a = os.path.join(UpperCamelCase , UpperCamelCase )
print(f'Save PyTorch model to {os.path.abspath(UpperCamelCase )}' )
torch.save(model.state_dict() , UpperCamelCase )
print(f'Save configuration file to {os.path.abspath(UpperCamelCase )}' )
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_snake_case : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 22 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def snake_case_ ():
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_a = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def snake_case_ ():
'''simple docstring'''
assert _test_patching.open is open
_a = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def snake_case_ ():
'''simple docstring'''
_a = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCamelCase ):
pass
def snake_case_ ():
'''simple docstring'''
_a = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , UpperCamelCase ) is None
with patch_submodule(_test_patching , '''len''' , UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def snake_case_ ():
'''simple docstring'''
_a = '''__test_patch_submodule_start_and_stop_mock__'''
_a = patch_submodule(_test_patching , '''open''' , UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def snake_case_ ():
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_a = '''__test_patch_submodule_successive_join__'''
_a = '''__test_patch_submodule_successive_dirname__'''
_a = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , UpperCamelCase ):
with patch_submodule(_test_patching , '''os.rename''' , UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def snake_case_ ():
'''simple docstring'''
_a = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCamelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCamelCase ):
pass
| 22 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : Tuple , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , param_name='''size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_snake_case : Union[str, Any] = True
except ImportError:
_snake_case : Tuple = False
_snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCAmelCase_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowerCAmelCase_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Optional[Any] , lowerCAmelCase_ : bool , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=None , *lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = testing
_a = testing_file
_a = path
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_a = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_a = (
Path(lowerCAmelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_a = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_a = json.load(lowerCAmelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase_ , extra_context=lowerCAmelCase_ , )
_a = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_a = json.load(lowerCAmelCase_ )
_a = configuration['''lowercase_modelname''']
_a = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json' )
_a = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_a = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_a = '''Flax''' in generate_tensorflow_pytorch_and_flax
_a = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCAmelCase_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w''' ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCAmelCase_ : int ):
with open(lowerCAmelCase_ , '''r''' ) as f:
_a = f.readlines()
with open(lowerCAmelCase_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ):
# Create temp file
_a , _a = mkstemp()
_a = False
with fdopen(lowerCAmelCase_ , '''w''' ) as new_file:
with open(lowerCAmelCase_ ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase_ )
if line_to_copy_below in line:
_a = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase_ , lowerCAmelCase_ )
# Remove original file
remove(lowerCAmelCase_ )
# Move new file
move(lowerCAmelCase_ , lowerCAmelCase_ )
def skip_units(lowerCAmelCase_ : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase_ : Optional[Any] ):
with open(lowerCAmelCase_ ) as datafile:
_a = []
_a = False
_a = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_a = line.split('''"''' )[1]
_a = skip_units(lowerCAmelCase_ )
elif "# Below: " in line and "##" not in line:
_a = line.split('''"''' )[1]
_a = skip_units(lowerCAmelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = []
elif "# Replace with" in line and "##" not in line:
_a = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase_ )
remove(lowerCAmelCase_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : list[str] ):
'''simple docstring'''
_a = ''''''
for word_or_phrase in separated:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_a = 0
_a = 0
_a = 0
_a = 0
# compute the shape of the output matrix
_a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_a = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_a = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_a = 0
_a = 0
return updated_arr
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_a = 0
_a = 0
_a = 0
_a = 0
# compute the shape of the output matrix
_a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_a = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_a = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_a = 0
_a = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_snake_case : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 22 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {'add_prefix_space': True}
lowercase_ = False
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : List[str] , **lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , **lowerCAmelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_a = tokens + [tokenizer.unk_token]
_a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
_a = '''lower newer'''
# Testing tokenization
_a = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_a = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
_a = tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing the unknown token
_a = tokens + [rust_tokenizer.unk_token]
_a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Any=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
_a = '''This is a simple input'''
_a = ['''This is a simple input 1''', '''This is a simple input 2''']
_a = ('''This is a simple input''', '''This is a pair''')
_a = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_a = '''This is a simple input'''
_a = ['''This is a simple input looooooooong''', '''This is a simple input''']
_a = ('''This is a simple input''', '''This is a pair''')
_a = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_a = tokenizer.pad_token_id
_a = tokenizer(lowerCAmelCase_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
_a = tokenizer(*lowerCAmelCase_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_a = '''$$$'''
_a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )
_a = '''This is a simple input'''
_a = ['''This is a simple input 1''', '''This is a simple input 2''']
_a = tokenizer.bos_token_id
_a = tokenizer(lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_a = tokenizer.decode(out_s.input_ids )
_a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = [self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_a = '''Encode this.'''
_a = '''This one too please.'''
_a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_a = tokenizer.encode_plus(
lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , )
_a = encoded_sequence_dict['''input_ids''']
_a = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
_a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_ )
]
_a = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCAmelCase_ )
_a = '''A photo of a cat'''
_a = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
_a = AutoTokenizer.from_pretrained('''./test_opt''' )
_a = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=lowerCAmelCase_ )
_a = '''A photo of a cat'''
_a = tokenizer.encode(
lowerCAmelCase_ , )
# Same as above
self.assertEqual(lowerCAmelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_a = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCAmelCase_ )
_a = '''bos'''
_a = tokenizer.get_vocab()['''bos''']
_a = '''A photo of a cat'''
_a = tokenizer.encode(
lowerCAmelCase_ , )
# We changed the bos token
self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
_a = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_a = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = abs(UpperCamelCase )
_a = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def snake_case_ ():
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase : Callable , UpperCamelCase : int ) -> None:
_a = f'{func.__name__}({value})'
_a = timeit(f'__main__.{call}' , setup='''import __main__''' )
print(f'{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
_a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_a = left
_a = point
elif point > right:
_a = right
_a = point
else:
if item < current_item:
_a = point - 1
else:
_a = point + 1
return None
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , point + 1 , UpperCamelCase )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
if collection != sorted(UpperCamelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_snake_case : int = 0
if debug == 1:
_snake_case : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_snake_case : Tuple = 67
_snake_case : Tuple = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Tuple = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
_a = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_a = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=UpperCamelCase , output_all_encodings=UpperCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_a = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
_a = os.path.join(get_home_dir() , '''models''' )
_a = _load_vocab(UpperCamelCase , UpperCamelCase , UpperCamelCase , cls=UpperCamelCase )
_a = nlp.model.BERTModel(
UpperCamelCase , len(UpperCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=UpperCamelCase , use_token_type_embed=UpperCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=UpperCamelCase , use_decoder=UpperCamelCase , )
original_bort.load_parameters(UpperCamelCase , cast_dtype=UpperCamelCase , ignore_extra=UpperCamelCase )
_a = original_bort._collect_params_with_prefix()
# Build our config 🤗
_a = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(UpperCamelCase ),
}
_a = BertConfig.from_dict(UpperCamelCase )
_a = BertForMaskedLM(UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase : Any , UpperCamelCase : List[str] ):
_a = hf_param.shape
_a = to_torch(params[gluon_param] )
_a = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
_a = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
_a = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
_a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
_a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_a = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_a = hf_bort_model.bert.encoder.layer[i]
# self attention
_a = layer.attention.self
_a = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
_a = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
_a = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
_a = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
_a = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
_a = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
_a = layer.attention.output
_a = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
_a = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
_a = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
_a = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
_a = layer.intermediate
_a = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
_a = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
_a = layer.output
_a = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
_a = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
_a = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
_a = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_a = RobertaTokenizer.from_pretrained('''roberta-base''' )
_a = tokenizer.encode_plus(UpperCamelCase )['''input_ids''']
# Get gluon output
_a = mx.nd.array([input_ids] )
_a = original_bort(inputs=UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase )
_a = BertModel.from_pretrained(UpperCamelCase )
hf_bort_model.eval()
_a = tokenizer.encode_plus(UpperCamelCase , return_tensors='''pt''' )
_a = hf_bort_model(**UpperCamelCase )[0]
_a = output_gluon[0].asnumpy()
_a = output_hf[0].detach().numpy()
_a = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_a = np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Any = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : Any = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_snake_case : Optional[int] = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = 'mask2former'
lowercase_ = ['swin']
lowercase_ = {'hidden_size': 'hidden_dim'}
def __init__( self : Any , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 10_24 , lowerCAmelCase_ : str = "relu" , lowerCAmelCase_ : int = 6 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 20_48 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 2_55 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 5.0 , lowerCAmelCase_ : float = 5.0 , lowerCAmelCase_ : int = 1_25_44 , lowerCAmelCase_ : float = 3.0 , lowerCAmelCase_ : float = 0.7_5 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : List[int] = [4, 8, 16, 32] , lowerCAmelCase_ : bool = None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
_a = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCAmelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(lowerCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
_a = backbone_config
_a = feature_size
_a = mask_feature_size
_a = hidden_dim
_a = encoder_feedforward_dim
_a = activation_function
_a = encoder_layers
_a = decoder_layers
_a = num_attention_heads
_a = dropout
_a = dim_feedforward
_a = pre_norm
_a = enforce_input_projection
_a = common_stride
_a = ignore_value
_a = num_queries
_a = no_object_weight
_a = class_weight
_a = mask_weight
_a = dice_weight
_a = train_num_points
_a = oversample_ratio
_a = importance_sample_ratio
_a = init_std
_a = init_xavier_std
_a = use_auxiliary_loss
_a = feature_strides
_a = output_auxiliary_logits
_a = decoder_layers
super().__init__(**lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return cls(
backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ , )
def __lowerCAmelCase ( self : int ) -> Dict[str, any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 22 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=14 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[int]=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Union[str, Any]=0.0_2 , ) -> Dict:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = rotary_dim
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = None
_a = vocab_size - 1
_a = vocab_size - 1
_a = vocab_size - 1
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = 20
_a = model_class_name(lowerCAmelCase_ )
_a = model.init_cache(input_ids.shape[0] , lowerCAmelCase_ )
_a = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_a = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_a = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , )
_a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_a = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase_ , )
_a = model(lowerCAmelCase_ )
_a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_a = 20
_a = model_class_name(lowerCAmelCase_ )
_a = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_a = model.init_cache(input_ids.shape[0] , lowerCAmelCase_ )
_a = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_a = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , )
_a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_a = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , )
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_a = FlaxGPTJModelTester(self )
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@tooslow
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
_a = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_a = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
_a = False
_a = model.config.eos_token_id
_a = jax.jit(model.generate )
_a = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
_a = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_a = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a = pt_inputs['''input_ids'''].shape
_a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
_a = 0
_a = 1
_a = 0
_a = 1
_a = pt_model_class(lowerCAmelCase_ ).eval()
_a = model_class(lowerCAmelCase_ , dtype=jnp.floataa )
_a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ )
_a = fx_state
with torch.no_grad():
_a = pt_model(**lowerCAmelCase_ ).to_tuple()
_a = fx_model(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase_ )
_a = model_class.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
_a = fx_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_a = pt_model_class(lowerCAmelCase_ ).eval()
_a = model_class(lowerCAmelCase_ , dtype=jnp.floataa )
_a = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params )
_a , _a = pt_inputs['''input_ids'''].shape
_a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
_a = 0
_a = 1
_a = 0
_a = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_a = pt_model(**lowerCAmelCase_ ).to_tuple()
_a = fx_model(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase_ )
_a = pt_model_class.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ )
with torch.no_grad():
_a = pt_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A ( _a ,_a ):
lowercase_ = 'pixel_values'
lowercase_ = False
lowercase_ = TimmBackboneConfig
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(lowerCAmelCase_ )
_a = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowerCAmelCase_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_a = getattr(lowerCAmelCase_ , '''use_pretrained_backbone''' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_a = config.out_indices if getattr(lowerCAmelCase_ , '''out_indices''' , lowerCAmelCase_ ) is not None else (-1,)
_a = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_a = self._backbone.return_layers
_a = {layer['''module''']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_a = kwargs.pop('''config''' , TimmBackboneConfig() )
_a = kwargs.pop('''use_timm_backbone''' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_a = kwargs.pop('''num_channels''' , config.num_channels )
_a = kwargs.pop('''features_only''' , config.features_only )
_a = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_a = kwargs.pop('''out_indices''' , config.out_indices )
_a = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Any ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_a = self._all_layers
_a = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self._return_layers
_a = tuple(hidden_states[i] for i in self.out_indices )
else:
_a = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = None
_a = tuple(lowerCAmelCase_ )
_a = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
_a = (feature_maps,)
if output_hidden_states:
_a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = int(UpperCamelCase )
if n_element < 1:
_a = ValueError('''a should be a positive number''' )
raise my_error
_a = [1]
_a , _a , _a = (0, 0, 0)
_a = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case : Tuple = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_snake_case : Any = hamming(int(n))
print('-----------------------------------------------------')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=32 * 8 , lowerCAmelCase_ : Tuple=32 * 8 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : int=64 , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = is_training
_a = use_auxiliary_loss
_a = num_queries
_a = num_channels
_a = min_size
_a = max_size
_a = num_labels
_a = hidden_dim
_a = hidden_dim
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase_ )
_a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ )
_a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5
).float()
_a = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long()
_a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_a = self.num_queries
_a = self.num_labels
_a = [1, 1, 1, 1]
_a = self.num_channels
_a = 64
_a = 1_28
_a = self.hidden_dim
_a = self.hidden_dim
_a = self.hidden_dim
return config
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a , _a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
_a = output.encoder_hidden_states
_a = output.pixel_decoder_hidden_states
_a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_layers )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=False ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
_a = MaskaFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> str:
"""simple docstring"""
_a = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
def comm_check_on_output(lowerCAmelCase_ : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
_a = model(
pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = MaskaFormerModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase_ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a = MaskaFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = (self.model_tester.min_size,) * 2
_a = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase_ ).long(),
}
_a = self.model_tester.get_config()
_a = MaskaFormerForUniversalSegmentation(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_a = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = True
_a = True
_a = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
model.train()
_a = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
_a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : List[str] = 1E-4
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_a = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
_a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
_a = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
_a = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
_a = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ ).eval()
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
_a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# masks_queries_logits
_a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_a = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
_a = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_a = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ ).eval()
_a = self.default_image_processor
_a = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_a = inputs['''pixel_values'''].to(lowerCAmelCase_ )
_a = [el.to(lowerCAmelCase_ ) for el in inputs['''mask_labels''']]
_a = [el.to(lowerCAmelCase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( _a ,unittest.TestCase ):
lowercase_ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]=0 ) -> Dict:
"""simple docstring"""
_a = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowerCAmelCase_ ) )
_a = np.random.RandomState(lowerCAmelCase_ )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# warmup pass to apply optimizations
_a = pipe(**self.get_dummy_inputs() )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCAmelCase_ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = ort.SessionOptions()
_a = False
return options
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='''np''' , )
_a = output.images
_a = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_a = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((7_68, 5_12) )
_a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase_ , output_type='''np''' , )
_a = output.images
_a = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_a = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A ( _a ):
lowercase_ = 'char'
lowercase_ = 'bpe'
lowercase_ = 'wp'
_snake_case : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A ( _a ):
lowercase_ = ['image_processor', 'char_tokenizer']
lowercase_ = 'ViTImageProcessor'
lowercase_ = 'MgpstrTokenizer'
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase_ , )
_a = kwargs.pop('''feature_extractor''' )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_a = tokenizer
_a = AutoTokenizer.from_pretrained('''gpt2''' )
_a = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : str , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_a = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a , _a , _a = sequences
_a = char_preds.size(0 )
_a , _a = self._decode_helper(lowerCAmelCase_ , '''char''' )
_a , _a = self._decode_helper(lowerCAmelCase_ , '''bpe''' )
_a , _a = self._decode_helper(lowerCAmelCase_ , '''wp''' )
_a = []
_a = []
for i in range(lowerCAmelCase_ ):
_a = [char_scores[i], bpe_scores[i], wp_scores[i]]
_a = [char_strs[i], bpe_strs[i], wp_strs[i]]
_a = scores.index(max(lowerCAmelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_a = {}
_a = final_strs
_a = final_scores
_a = char_strs
_a = bpe_strs
_a = wp_strs
return out
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
if format == DecodeType.CHARACTER:
_a = self.char_decode
_a = 1
_a = '''[s]'''
elif format == DecodeType.BPE:
_a = self.bpe_decode
_a = 2
_a = '''#'''
elif format == DecodeType.WORDPIECE:
_a = self.wp_decode
_a = 1_02
_a = '''[SEP]'''
else:
raise ValueError(F'Format {format} is not supported.' )
_a , _a = [], []
_a = pred_logits.size(0 )
_a = pred_logits.size(1 )
_a , _a = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ )
_a = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:]
_a = decoder(lowerCAmelCase_ )
_a , _a = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 )
_a = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase_ ):
_a = preds_str[index].find(lowerCAmelCase_ )
_a = preds_str[index][:pred_eos]
_a = preds_index[index].cpu().tolist()
_a = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1
_a = preds_max_prob[index][: pred_eos_index + 1]
_a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase_ )
conf_scores.append(lowerCAmelCase_ )
return dec_strs, conf_scores
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
def extract(*lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any] ):
class A :
def __init__( self : Dict ) -> int:
"""simple docstring"""
_a = torch.ones([0] )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(pipe.scheduler , lowerCAmelCase_ )
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
_a = 40_03_66_03_46
_a = 7
# without safety guidance (sld_guidance_scale = 0)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
_a = 27_34_97_17_55
_a = 7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
_a = 10_44_35_52_34
_a = 12
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A :
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : int=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Tuple=9 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Tuple=37 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.0_0_2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCAmelCase_ )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase_ )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, input_dict
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
_a = UMTaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(
input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , )
_a = model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCAmelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
_a = UMTaModel(config=lowerCAmelCase_ ).get_decoder().to(lowerCAmelCase_ ).eval()
# first forward pass
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(lowerCAmelCase_ )['''last_hidden_state''']
_a = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , ) -> Optional[int]:
"""simple docstring"""
_a = UMTaModel(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).half().eval()
_a = model(**lowerCAmelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowerCAmelCase_ ).any().item() )
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase_ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = True
lowercase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase_ = [0.8, 0.9]
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCAmelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=lowerCAmelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(lowerCAmelCase_ ).eval()
model.to(lowerCAmelCase_ )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowerCAmelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase_ ),
}
for attn_name, (name, mask) in zip(lowerCAmelCase_ , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCAmelCase_ )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowerCAmelCase_ , return_dict_in_generate=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowerCAmelCase_ , legacy=lowerCAmelCase_ )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCAmelCase_ , lowerCAmelCase_ )
_a = model.generate(input_ids.to(lowerCAmelCase_ ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
_a = number_of_bytes // partitions
_a = []
for i in range(UpperCamelCase ):
_a = i * bytes_per_partition + 1
_a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(UpperCamelCase , (list, tuple) ) or not all(
isinstance(UpperCamelCase , UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_a = _a = _a = numbers[0]
for i in range(1 , len(UpperCamelCase ) ):
# update the maximum and minimum subarray products
_a = numbers[i]
if number < 0:
_a , _a = min_till_now, max_till_now
_a = max(UpperCamelCase , max_till_now * number )
_a = min(UpperCamelCase , min_till_now * number )
# update the maximum product found till now
_a = max(UpperCamelCase , UpperCamelCase )
return max_prod
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class A :
def __init__( self : Tuple , lowerCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_a = deepcopy(lowerCAmelCase_ )
elif os.path.exists(lowerCAmelCase_ ):
with io.open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
_a = json.load(lowerCAmelCase_ )
else:
try:
_a = baseaa.urlsafe_baadecode(lowerCAmelCase_ ).decode('''utf-8''' )
_a = json.loads(lowerCAmelCase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
_a = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_value('''zero_optimization.stage''' , -1 )
# offload
_a = False
if self.is_zeroa() or self.is_zeroa():
_a = set(['''cpu''', '''nvme'''] )
_a = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_a = True
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
_a = self.config
# find the config node of interest if it exists
_a = ds_key_long.split('''.''' )
_a = nodes.pop()
for node in nodes:
_a = config.get(lowerCAmelCase_ )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.find_config_node(lowerCAmelCase_ )
if config is None:
return default
return config.get(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str=False ) -> Tuple:
"""simple docstring"""
_a = self.config
# find the config node of interest if it exists
_a = ds_key_long.split('''.''' )
for node in nodes:
_a = config
_a = config.get(lowerCAmelCase_ )
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
_a = self.get_value(lowerCAmelCase_ )
return False if value is None else bool(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple ) -> Dict:
"""simple docstring"""
_a = self.get_value(lowerCAmelCase_ )
return False if value is None else not bool(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self._stage == 2
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return self._stage == 3
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self._offload
class A :
def __init__( self : int , lowerCAmelCase_ : Any ) -> List[Any]:
"""simple docstring"""
_a = engine
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
self.engine.backward(lowerCAmelCase_ , **lowerCAmelCase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class A ( _a ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , device_placement=lowerCAmelCase_ , scaler=lowerCAmelCase_ )
_a = hasattr(self.optimizer , '''overflow''' )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Any=None ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class A ( _a ):
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ) -> int:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=0.0_0_1 , lowerCAmelCase_ : Optional[int]=0 , **lowerCAmelCase_ : Any ) -> str:
"""simple docstring"""
_a = params
_a = lr
_a = weight_decay
_a = kwargs
class A :
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=0 , **lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = optimizer
_a = total_num_steps
_a = warmup_num_steps
_a = kwargs
| 22 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=10 ):
'''simple docstring'''
_a = []
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple=10 ):
'''simple docstring'''
_a = []
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(UpperCamelCase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , UpperCamelCase )
_a = torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ )
_a = torch.tensor([0.4, 0.2, -0.5] )
_a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_a = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
_a = criterion(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ )
_a = torch.tensor([0.4, 0.2, -0.5] )
_a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_a = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase_ , weight_decay=0.0 , relative_step=lowerCAmelCase_ , scale_parameter=lowerCAmelCase_ , warmup_init=lowerCAmelCase_ , )
for _ in range(10_00 ):
_a = criterion(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class A ( unittest.TestCase ):
lowercase_ = nn.Linear(50 ,50 ) if is_torch_available() else None
lowercase_ = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
lowercase_ = 10
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ , msg=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_a = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
_a , _a = data
_a = scheduler_func(self.optimizer , **lowerCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_a = unwrap_schedule(lowerCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
lowerCAmelCase_ , lowerCAmelCase_ , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , )
_a = scheduler_func(self.optimizer , **lowerCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_ ) # wrap to test picklability of the schedule
_a = unwrap_and_save_reload_schedule(lowerCAmelCase_ , self.num_steps )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ , msg=F'failed for {scheduler_func} in save and reload' )
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = fn
def __call__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.fn(*lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
_a = list(map(self , scheduler.lr_lambdas ) )
| 22 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Optional[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['CLIPFeatureExtractor']
_snake_case : int = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'The output directory where the model will be written.'} ,)
lowercase_ = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} ,)
lowercase_ = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments,) )
((_a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a = True
_a = True
_a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCamelCase , decoder_config=UpperCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a = decoder_config.decoder_start_token_id
_a = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a = decoder_config.bos_token_id
if pad_token_id is None:
_a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a = decoder_config.eos_token_id
_a = decoder_start_token_id
_a = pad_token_id
_a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_a = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase ).content
if __name__ == "__main__":
_snake_case : List[Any] = input('Enter Video/IGTV url: ').strip()
_snake_case : Dict = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_snake_case : Tuple = 637_8137.0
_snake_case : Optional[int] = 635_6752.31_4245
_snake_case : int = 6378137
def snake_case_ (UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
'''simple docstring'''
_a = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_a = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
_a = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_a = haversine_distance(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_a = (b_lata + b_lata) / 2
_a = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_a = (sin(UpperCamelCase ) ** 2) * (cos(UpperCamelCase ) ** 2)
_a = cos(sigma / 2 ) ** 2
_a = (sigma - sin(UpperCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_a = (cos(UpperCamelCase ) ** 2) * (sin(UpperCamelCase ) ** 2)
_a = sin(sigma / 2 ) ** 2
_a = (sigma + sin(UpperCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case_ (UpperCamelCase : Callable[[int | float], int | float] , UpperCamelCase : int | float , UpperCamelCase : int | float , UpperCamelCase : int = 100 , ):
'''simple docstring'''
_a = x_start
_a = fnc(UpperCamelCase )
_a = 0.0
for _ in range(UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
_a = (x_end - x_start) / steps + xa
_a = fnc(UpperCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_a = xa
_a = fxa
return length
if __name__ == "__main__":
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_snake_case : Dict = 10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Tuple:
"""simple docstring"""
_a = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool , lowerCAmelCase_ : bool ) -> Tuple:
"""simple docstring"""
_a = model
_a = cache
_a = force
_a = trust_remote_code
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ):
'''simple docstring'''
_a = right or len(UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCamelCase , UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ (UpperCamelCase : list[Any] ):
'''simple docstring'''
create_state_space_tree(UpperCamelCase , [] , 0 )
def snake_case_ (UpperCamelCase : list[Any] , UpperCamelCase : list[Any] , UpperCamelCase : int ):
'''simple docstring'''
if index == len(UpperCamelCase ):
print(UpperCamelCase )
return
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 22 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_snake_case : Dict = get_logger(__name__)
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[str] = None ) -> str:
"""simple docstring"""
_a = (
os.path.join(lowerCAmelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(lowerCAmelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(lowerCAmelCase_ ) and not (os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ ))
)
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> str:
"""simple docstring"""
_a = self.extractor.infer_extractor_format(lowerCAmelCase_ )
if not extractor_format:
return input_path
_a = self._get_output_path(lowerCAmelCase_ )
if self._do_extract(lowerCAmelCase_ , lowerCAmelCase_ ):
self.extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return output_path
class A ( _a ):
@classmethod
@abstractmethod
def __lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : int ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class A ( _a ,_a ):
lowercase_ = []
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
with open(lowerCAmelCase_ , '''rb''' ) as f:
return f.read(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Dict , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
_a = max(len(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers )
class A ( _a ):
@classmethod
def __lowerCAmelCase ( cls : int , lowerCAmelCase_ : Union[Path, str] , **lowerCAmelCase_ : Union[str, Any] ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(lowerCAmelCase_ )
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Dict:
"""simple docstring"""
def resolved(lowerCAmelCase_ : str ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase_ ) )
def badpath(lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ).startswith(lowerCAmelCase_ )
def badlink(lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(lowerCAmelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase_ )
_a = resolved(lowerCAmelCase_ )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_a = tarfile.open(lowerCAmelCase_ )
tar_file.extractall(lowerCAmelCase_ , members=TarExtractor.safemembers(lowerCAmelCase_ , lowerCAmelCase_ ) )
tar_file.close()
class A ( _a ):
lowercase_ = [b'\x1F\x8B']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(lowerCAmelCase_ , '''rb''' ) as gzip_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase_ , '''rb''' ) as fp:
_a = _EndRecData(lowerCAmelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(lowerCAmelCase_ ) # CD is where we expect it to be
if len(lowerCAmelCase_ ) == sizeCentralDir:
_a = struct.unpack(lowerCAmelCase_ , lowerCAmelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with zipfile.ZipFile(lowerCAmelCase_ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase_ )
zip_file.close()
class A ( _a ):
lowercase_ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(lowerCAmelCase_ ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_a = rarfile.RarFile(lowerCAmelCase_ )
rf.extractall(lowerCAmelCase_ )
rf.close()
class A ( _a ):
lowercase_ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(lowerCAmelCase_ , '''rb''' ) as ifh, open(lowerCAmelCase_ , '''wb''' ) as ofh:
dctx.copy_stream(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x42\x5A\x68']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(lowerCAmelCase_ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with pyazr.SevenZipFile(lowerCAmelCase_ , '''r''' ) as archive:
archive.extractall(lowerCAmelCase_ )
class A ( _a ):
lowercase_ = [b'\x04\x22\x4D\x18']
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(lowerCAmelCase_ , '''rb''' ) as compressed_file:
with open(lowerCAmelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
class A :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCAmelCase ( cls : List[Any] ) -> Optional[int]:
"""simple docstring"""
return max(
len(lowerCAmelCase_ )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase_ , magic_number_length=lowerCAmelCase_ )
except OSError:
return b""
@classmethod
def __lowerCAmelCase ( cls : int , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=lowerCAmelCase_ , )
_a = cls.infer_extractor_format(lowerCAmelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase_ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ):
return extractor_format
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Union[Path, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(lowerCAmelCase_ ) , exist_ok=lowerCAmelCase_ )
# Prevent parallel extractions
_a = str(Path(lowerCAmelCase_ ).with_suffix('''.lock''' ) )
with FileLock(lowerCAmelCase_ ):
shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=lowerCAmelCase_ , )
_a = extractor if extractor != '''deprecated''' else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=lowerCAmelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase_ ):
return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_snake_case , _snake_case : Any = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_snake_case : int = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_snake_case : List[Any] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case : Optional[int] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( _a ):
lowercase_ = 'bert'
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict=3_05_22 , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class A ( _a ):
@property
def __lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ():
'''simple docstring'''
_a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=UpperCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=UpperCamelCase )
return parser.parse_args()
def snake_case_ ():
'''simple docstring'''
_a = parse_args()
# Import training_script as a module.
_a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a = script_fpath.stem
_a = importlib.import_module(UpperCamelCase )
# Patch sys.argv
_a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import math
def snake_case_ (UpperCamelCase : list , UpperCamelCase : int ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = int(math.floor(math.sqrt(UpperCamelCase ) ) )
_a = 0
while arr[min(UpperCamelCase , UpperCamelCase ) - 1] < x:
_a = step
step += int(math.floor(math.sqrt(UpperCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_a = prev + 1
if prev == min(UpperCamelCase , UpperCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_snake_case : str = input('Enter numbers separated by a comma:\n').strip()
_snake_case : List[str] = [int(item) for item in user_input.split(',')]
_snake_case : Any = int(input('Enter the number to be searched:\n'))
_snake_case : str = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'''Number {x} is at index {res}''')
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Any = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=99 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Union[str, Any]=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=50 , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : str=None , ) -> Optional[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = use_labels
_a = scope
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] , ) -> Dict:
"""simple docstring"""
_a = BertGenerationEncoder(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = True
_a = BertGenerationEncoder(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = BertGenerationDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
# first forward pass
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['''hidden_states'''][0]
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
_a = BertGenerationDecoder(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase_ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = BertGenerationEncoderTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = '''bert'''
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_a = model(lowerCAmelCase_ )[0]
_a = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , lowerCAmelCase_ )
_a = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_a = model(lowerCAmelCase_ )[0]
_a = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , lowerCAmelCase_ )
_a = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class A ( _a ):
lowercase_ = field(default='language-modeling' ,metadata={'include_in_asdict_even_if_is_default': True} )
lowercase_ = Features({'text': Value('string' )} )
lowercase_ = Features({} )
lowercase_ = "text"
@property
def __lowerCAmelCase ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[str] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( _a ):
lowercase_ = 'mobilenet_v1'
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Any=2_24 , lowerCAmelCase_ : Tuple=1.0 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : str="relu6" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=0.9_9_9 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : List[Any]=0.0_0_1 , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = min_depth
_a = hidden_act
_a = tf_padding
_a = classifier_dropout_prob
_a = initializer_range
_a = layer_norm_eps
class A ( _a ):
lowercase_ = version.parse('1.11' )
@property
def __lowerCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : str ):
'''simple docstring'''
_a = int(UpperCamelCase )
# Initialize Result
_a = []
# Traverse through all denomination
for denomination in reversed(UpperCamelCase ):
# Find denominations
while int(UpperCamelCase ) >= int(UpperCamelCase ):
total_value -= int(UpperCamelCase )
answer.append(UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_snake_case : Tuple = []
_snake_case : Tuple = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_snake_case : Union[str, Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_snake_case : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_snake_case : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
_snake_case : List[str] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
_snake_case : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Any = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( _a ):
lowercase_ = 'segformer'
def __init__( self : Dict , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[Any]=[2, 2, 2, 2] , lowerCAmelCase_ : Any=[8, 4, 2, 1] , lowerCAmelCase_ : Optional[int]=[32, 64, 1_60, 2_56] , lowerCAmelCase_ : Union[str, Any]=[7, 3, 3, 3] , lowerCAmelCase_ : Union[str, Any]=[4, 2, 2, 2] , lowerCAmelCase_ : Dict=[1, 2, 5, 8] , lowerCAmelCase_ : Optional[int]=[4, 4, 4, 4] , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=1e-6 , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : List[Any]=2_55 , **lowerCAmelCase_ : List[str] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , lowerCAmelCase_ , )
_a = num_channels
_a = num_encoder_blocks
_a = depths
_a = sr_ratios
_a = hidden_sizes
_a = patch_sizes
_a = strides
_a = mlp_ratios
_a = num_attention_heads
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = classifier_dropout_prob
_a = initializer_range
_a = drop_path_rate
_a = layer_norm_eps
_a = decoder_hidden_size
_a = kwargs.get('''reshape_last_stage''' , lowerCAmelCase_ )
_a = semantic_loss_ignore_index
class A ( _a ):
lowercase_ = version.parse('1.11' )
@property
def __lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
@property
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return 12
| 22 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = [10, 20, 30, 40, 50, 60]
_a = [2, 4, 6, 8, 10, 12]
_a = 1_00
self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_10 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Weight can not be negative.''' )
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Profit can not be negative.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(
lowerCAmelCase_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 22 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import copy
import re
class A :
lowercase_ = 'hp'
lowercase_ = {}
lowercase_ = None
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
_a = prefix
_a = defaults
cls.build_naming_info()
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if len(lowerCAmelCase_ ) == 0:
return ""
_a = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCAmelCase_ ) + 1 ):
_a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase_ : Optional[int] ):
_a = ''''''
while integer != 0:
_a = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
_a = 0
while True:
_a = word + '''#''' + int_to_alphabetic(lowerCAmelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
_a = sword
break
_a = short_word
_a = word
return short_word
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
_a = param_name.split('''_''' )
_a = [TrialShortNamer.shortname_for_word(lowerCAmelCase_ , lowerCAmelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_a = ['''''', '''_''']
for separator in separators:
_a = separator.join(lowerCAmelCase_ )
if shortname not in info["reverse_short_param"]:
_a = shortname
_a = param_name
return shortname
return param_name
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
_a = TrialShortNamer.shortname_for_key(lowerCAmelCase_ , lowerCAmelCase_ )
_a = short_name
_a = param_name
@classmethod
def __lowerCAmelCase ( cls : Dict ) -> Optional[int]:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
_a = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase_ , lowerCAmelCase_ )
_a = info
@classmethod
def __lowerCAmelCase ( cls : int , lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
_a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_a = cls.NAMING_INFO['''short_param'''][k]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = 1 if v else 0
_a = '''''' if isinstance(lowerCAmelCase_ , (int, float) ) else '''-'''
_a = F'{key}{sep}{v}'
name.append(lowerCAmelCase_ )
return "_".join(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
_a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_a = []
else:
_a = repr.split('''_''' )
_a = {}
for value in values:
if "-" in value:
_a , _a = value.split('''-''' )
else:
_a = re.sub('''[0-9.]''' , '''''' , lowerCAmelCase_ )
_a = float(re.sub('''[^0-9.]''' , '''''' , lowerCAmelCase_ ) )
_a = cls.NAMING_INFO['''reverse_short_param'''][p_k]
_a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_a = cls.DEFAULTS[k]
return parameters
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Optional[int] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['ConditionalDetrFeatureExtractor']
_snake_case : int = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
_a , _a = emb.weight.shape
_a = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
_a = emb.weight.data
return lin_layer
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Any="facebook/mbart-large-en-ro" , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any=False ):
'''simple docstring'''
_a = torch.load(UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(UpperCamelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(UpperCamelCase , vocab_size=UpperCamelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(UpperCamelCase )
model.model.load_state_dict(UpperCamelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 22 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ) -> Any:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_a = kwargs.pop('''main_process_only''' , lowerCAmelCase_ )
_a = kwargs.pop('''in_order''' , lowerCAmelCase_ )
if self.isEnabledFor(lowerCAmelCase_ ):
if self._should_log(lowerCAmelCase_ ):
_a , _a = self.process(lowerCAmelCase_ , lowerCAmelCase_ )
self.logger.log(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif in_order:
_a = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_a , _a = self.process(lowerCAmelCase_ , lowerCAmelCase_ )
self.logger.log(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
state.wait_for_everyone()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str = None ):
'''simple docstring'''
if log_level is None:
_a = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase )
_a = logging.getLogger(UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase , {} )
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if len(UpperCamelCase ) == 0:
return array
_a , _a = min(UpperCamelCase ), max(UpperCamelCase )
# Compute the variables
_a = _max - _min + 1
_a , _a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_a = i - _min
_a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_a = 0
for i in range(UpperCamelCase ):
while holes_repeat[i] > 0:
_a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : str = input('Enter numbers separated by comma:\n')
_snake_case : Union[str, Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 22 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class A ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_a = image_classifier(lowerCAmelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase_ ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
_a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_a = image_classifier(lowerCAmelCase_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
_a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
{'''score''': 0.3_3_3, '''label''': ANY(lowerCAmelCase_ )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_a = image_classifier(lowerCAmelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
_a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_a = image_classifier(lowerCAmelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
_a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {'vocab_file': 'spm_char.model'}
_snake_case : Union[str, Any] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
_snake_case : List[str] = {
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : int="<pad>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : List[str] , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> List[str]:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Any , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
_a = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_a = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
_a = [1]
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + suffix_ones
return ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( _a ):
lowercase_ = 42
class A ( nn.Module ):
def __init__( self : str , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Dict=("DownEncoderBlock2D",) , lowerCAmelCase_ : int=(64,) , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : int="silu" , lowerCAmelCase_ : Optional[Any]=True , ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = layers_per_block
_a = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
# down
_a = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(lowerCAmelCase_ ) - 1
_a = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1e-6 )
_a = nn.SiLU()
_a = 2 * out_channels if double_z else out_channels
_a = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_a = False
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = x
_a = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ : Tuple ):
def custom_forward(*lowerCAmelCase_ : Tuple ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_a = down_block(lowerCAmelCase_ )
# middle
_a = self.mid_block(lowerCAmelCase_ )
# post-process
_a = self.conv_norm_out(lowerCAmelCase_ )
_a = self.conv_act(lowerCAmelCase_ )
_a = self.conv_out(lowerCAmelCase_ )
return sample
class A ( nn.Module ):
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=("UpDecoderBlock2D",) , lowerCAmelCase_ : List[str]=(64,) , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Union[str, Any]="silu" , lowerCAmelCase_ : Dict="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
_a = layers_per_block
_a = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
_a = in_channels if norm_type == '''spatial''' else None
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_a = list(reversed(lowerCAmelCase_ ) )
_a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_a = output_channel
_a = reversed_block_out_channels[i]
_a = i == len(lowerCAmelCase_ ) - 1
_a = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_a = output_channel
# out
if norm_type == "spatial":
_a = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1e-6 )
_a = nn.SiLU()
_a = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_a = False
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
_a = z
_a = self.conv_in(lowerCAmelCase_ )
_a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ : Dict ):
def custom_forward(*lowerCAmelCase_ : Tuple ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_a = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_a = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_a = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_a = self.conv_norm_out(lowerCAmelCase_ )
else:
_a = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.conv_act(lowerCAmelCase_ )
_a = self.conv_out(lowerCAmelCase_ )
return sample
class A ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple="random" , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=True ) -> List[Any]:
"""simple docstring"""
super().__init__()
_a = n_e
_a = vq_embed_dim
_a = beta
_a = legacy
_a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_a = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
_a = self.used.shape[0]
_a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_a = self.re_embed
_a = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_a = n_e
_a = sane_index_shape
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
_a = inds.shape
assert len(lowerCAmelCase_ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(lowerCAmelCase_ )
_a = (inds[:, :, None] == used[None, None, ...]).long()
_a = match.argmax(-1 )
_a = match.sum(2 ) < 1
if self.unknown_index == "random":
_a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_a = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
_a = inds.shape
assert len(lowerCAmelCase_ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_a = 0 # simply set to zero
_a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> int:
"""simple docstring"""
_a = z.permute(0 , 2 , 3 , 1 ).contiguous()
_a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_a = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_a = self.embedding(lowerCAmelCase_ ).view(z.shape )
_a = None
_a = None
# compute loss for embedding
if not self.legacy:
_a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_a = z + (z_q - z).detach()
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_a = self.remap_to_used(lowerCAmelCase_ )
_a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
if self.remap is not None:
_a = indices.reshape(shape[0] , -1 ) # add batch axis
_a = self.unmap_to_all(lowerCAmelCase_ )
_a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_a = self.embedding(lowerCAmelCase_ )
if shape is not None:
_a = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( _a ):
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
_a = parameters
_a , _a = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_a = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_a = deterministic
_a = torch.exp(0.5 * self.logvar )
_a = torch.exp(self.logvar )
if self.deterministic:
_a = _a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
_a = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_a = self.mean + self.std * sample
return x
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[str]=None ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=[1, 2, 3] ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 22 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case : List[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase , id=UpperCamelCase )
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str ):
'''simple docstring'''
if exitstatus == 5:
_a = 0
# Doctest custom flag to ignore output.
_snake_case : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
_snake_case : Union[str, Any] = doctest.OutputChecker
class A ( _a ):
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = CustomOutputChecker
_snake_case : Optional[Any] = HfDoctestModule
_snake_case : str = HfDocTestParser
| 22 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_snake_case : str = float('nan')
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = sys.stdout
_a = open(lowerCAmelCase_ , '''a''' )
def __getattr__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return getattr(self.stdout , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
self.stdout.write(lowerCAmelCase_ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , lowerCAmelCase_ , 0 , re.M ) )
def snake_case_ (UpperCamelCase : List[str]=80 , UpperCamelCase : int=False ):
'''simple docstring'''
_a = []
# deal with critical env vars
_a = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_a = os.environ.get(UpperCamelCase , UpperCamelCase )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
_a = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_a = []
_a = ''''''
while len(UpperCamelCase ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase ) == 0 or len(UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase )
_a = ''''''
return "\\\n".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
_a = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
_a = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
_a = subprocess.run(UpperCamelCase , capture_output=UpperCamelCase , text=UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
_a = variation.replace(''' ''' , '''-''' )
with open(Path(UpperCamelCase ) / f'log.{prefix}.stdout.txt' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase ) / f'log.{prefix}.stderr.txt' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , '''r''' , encoding='''utf-8''' ) as f:
_a = json.load(UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[str] , ):
'''simple docstring'''
_a = []
_a = []
_a = f'{id}: {variation:<{longest_variation_len}}'
_a = f'{preamble}: '
_a = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase ) , desc=UpperCamelCase , leave=UpperCamelCase ):
_a = process_run_single(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase ):
metrics.append(UpperCamelCase )
results.append(UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
_a = f'\33[2K\r{outcome}'
if len(UpperCamelCase ) > 0:
_a = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_a = round(mean_metrics[target_metric_key] , 2 )
_a = f'{outcome} {mean_target}'
if len(UpperCamelCase ) > 1:
results_str += f' {tuple(round(UpperCamelCase , 2 ) for x in results )}'
print(UpperCamelCase )
_a = variation
return mean_metrics
else:
print(UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ():
'''simple docstring'''
_a = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
_a = pd.DataFrame(UpperCamelCase )
_a = '''variation'''
_a = '''diff_%'''
_a = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_a = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
_a = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase ):
_a = df.apply(
lambda UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
_a = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_a = df.reindex(UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
_a = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
_a = df.rename(lambda UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
_a = df.rename(lambda UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
_a = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(UpperCamelCase ) )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=UpperCamelCase , type=UpperCamelCase , nargs='''+''' , required=UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=UpperCamelCase , type=UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_a = parser.parse_args()
_a = args.output_dir
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
_a = get_base_command(UpperCamelCase , UpperCamelCase )
# split each dimension into its --foo variations
_a = [list(map(str.strip , re.split(R'''\|''' , UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_a = list(map(str.strip , map(''' '''.join , itertools.product(*UpperCamelCase ) ) ) )
_a = max(len(UpperCamelCase ) for x in variations )
# split wanted keys
_a = args.report_metric_keys.split()
# capture prints into a log file for convenience
_a = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
_a = Tee(UpperCamelCase )
print(f'\n*** Running {len(UpperCamelCase )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase )}' )
_a = '''variation'''
_a = []
for id, variation in enumerate(tqdm(UpperCamelCase , desc='''Total completion: ''' , leave=UpperCamelCase ) ):
_a = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , args.target_metric_key , UpperCamelCase , args.repeat_times , UpperCamelCase , args.verbose , ) )
process_results(UpperCamelCase , args.target_metric_key , UpperCamelCase , args.base_variation , UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A ( _a ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
class A ( _a ,_a ):
lowercase_ = 2
@register_to_config
def __init__( self : Dict , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1_00 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Tuple:
"""simple docstring"""
_a = sigma_max
# setable values
_a = None
_a = None
_a = None # sigma(t_i)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> List[str]:
"""simple docstring"""
_a = num_inference_steps
_a = np.arange(0 , self.num_inference_steps )[::-1].copy()
_a = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_a = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_a = 0
# sample eps ~ N(0, S_noise^2 * I)
_a = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
_a = sigma + gamma * sigma
_a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_a = sample_hat + sigma_hat * model_output
_a = (sample_hat - pred_original_sample) / sigma_hat
_a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_a = sample_prev + sigma_prev * model_output
_a = (sample_prev - pred_original_sample) / sigma_prev
_a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_snake_case : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_snake_case : Optional[int] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ (UpperCamelCase : tuple ):
'''simple docstring'''
return x[0]
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_letter_count(UpperCamelCase )
_a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase )
_a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase )
_a = ''''''.join(freq_to_letter[freq] )
_a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase , reverse=UpperCamelCase )
_a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_frequency_order(UpperCamelCase )
_a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = [int(UpperCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(UpperCamelCase ) == 4 and all(0 <= int(UpperCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case : Optional[Any] = input().strip()
_snake_case : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[str] = 16
_snake_case : int = 32
def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : DatasetDict , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int = 16 ):
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase ),
'''validation''': dataset['''train'''].select(UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
UpperCamelCase , padding='''longest''' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = []
# Download the dataset
_a = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_a = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
_a = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_a = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
_a , _a , _a = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
_a = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits
_a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a = torch.cat(UpperCamelCase , dim=0 )
_a = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase , default=UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
_a = parser.parse_args()
_a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ():
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase_ ) ) )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : str ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a , _a = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase_ : Tuple ):
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Any ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = torch.cuda.memory_allocated()
_a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase_ )
_a = release_memory(lowerCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_a = BlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] , **lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = self.prepare_image_inputs()
_a = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_a = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = processor(text=lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(lowerCAmelCase_ )
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : List[str] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case_ (UpperCamelCase : str = "isbn/0140328726" ):
'''simple docstring'''
_a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_a = f'{olid} is not a valid Open Library olid'
raise ValueError(UpperCamelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
_a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
_a = ''', '''.join(UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case : str = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case : str = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_a = DetaConfig(
backbone_config=UpperCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=UpperCamelCase , with_box_refine=UpperCamelCase , two_stage=UpperCamelCase , )
# set labels
_a = '''huggingface/label-files'''
if "o365" in model_name:
_a = 366
_a = '''object365-id2label.json'''
else:
_a = 91
_a = '''coco-detection-id2label.json'''
_a = num_labels
_a = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = dct.pop(UpperCamelCase )
_a = val
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_a = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:dim, :]
_a = in_proj_bias[: dim]
_a = in_proj_weight[
dim : dim * 2, :
]
_a = in_proj_bias[
dim : dim * 2
]
_a = in_proj_weight[
-dim :, :
]
_a = in_proj_bias[-dim :]
# fmt: on
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Dict ):
'''simple docstring'''
_a = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_a = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:hidden_size, :]
_a = in_proj_bias[:hidden_size]
_a = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_a = in_proj_bias[hidden_size : hidden_size * 2]
_a = in_proj_weight[-hidden_size:, :]
_a = in_proj_bias[-hidden_size:]
def snake_case_ ():
'''simple docstring'''
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = get_deta_config(UpperCamelCase )
# load original state dict
if model_name == "deta-swin-large":
_a = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_a = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
_a = torch.load(UpperCamelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(UpperCamelCase , param.shape )
# rename keys
_a = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_swin_q_k_v(UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase , UpperCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_a = state_dict.pop(UpperCamelCase )
_a = val
if "input_proj" in key:
_a = state_dict.pop(UpperCamelCase )
_a = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_a = state_dict.pop(UpperCamelCase )
_a = val
# finally, create HuggingFace model and load state dict
_a = DetaForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
_a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(UpperCamelCase )
# load image processor
_a = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_a = prepare_img()
_a = processor(images=UpperCamelCase , return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = model(pixel_values.to(UpperCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_a = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_a = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_a = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_a = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_a = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_a = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = self.prepare_image_inputs()
_a = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_a = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = processor(text=lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(lowerCAmelCase_ )
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 22 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def snake_case_ ():
'''simple docstring'''
_a = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_a = Dataset.from_dict(UpperCamelCase )
return dataset
class A ( _a ):
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = get_dataset()
_a = make_duplicate_clusters(lowerCAmelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = get_dataset()
_a , _a = deduplicate_dataset(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
print(lowerCAmelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_a = len(UpperCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_a = mid + 1
else:
_a = mid
return lo
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_a = len(UpperCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_a = mid + 1
else:
_a = mid
return lo
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase ) - 1
while left <= right:
_a = left + (right - left) // 2
_a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_a = midpoint - 1
else:
_a = midpoint + 1
return None
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
_a = bisect.bisect_left(UpperCamelCase , UpperCamelCase )
if index != len(UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if right < left:
return None
_a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCamelCase , UpperCamelCase , midpoint + 1 , UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
_snake_case : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
_snake_case : Union[str, Any] = int(input('Enter a single number to be found in the list:\n'))
_snake_case : Union[str, Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_a = [True] * (num + 1)
_a = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
_a = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 22 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case : Optional[int] = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
lowercase_ = None
class A ( _a ):
lowercase_ = 'train'
lowercase_ = 'dev'
lowercase_ = 'test'
class A :
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[InputExample] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Dict="[CLS]" , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Dict=-1_00 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : str=True , ) -> List[InputFeatures]:
"""simple docstring"""
_a = {label: i for i, label in enumerate(lowerCAmelCase_ )}
_a = []
for ex_index, example in enumerate(lowerCAmelCase_ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , lowerCAmelCase_ , len(lowerCAmelCase_ ) )
_a = []
_a = []
for word, label in zip(example.words , example.labels ):
_a = tokenizer.tokenize(lowerCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCAmelCase_ ) > 0:
tokens.extend(lowerCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a = tokenizer.num_special_tokens_to_add()
if len(lowerCAmelCase_ ) > max_seq_length - special_tokens_count:
_a = tokens[: (max_seq_length - special_tokens_count)]
_a = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a = [sequence_a_segment_id] * len(lowerCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a = [cls_token] + tokens
_a = [pad_token_label_id] + label_ids
_a = [cls_token_segment_id] + segment_ids
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a = [1 if mask_padding_with_zero else 0] * len(lowerCAmelCase_ )
# Zero-pad up to the sequence length.
_a = max_seq_length - len(lowerCAmelCase_ )
if pad_on_left:
_a = ([pad_token] * padding_length) + input_ids
_a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a = ([pad_token_segment_id] * padding_length) + segment_ids
_a = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
assert len(lowerCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(lowerCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a = None
features.append(
InputFeatures(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A ( _a ):
lowercase_ = 42
lowercase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , lowerCAmelCase_ : TokenClassificationTask , lowerCAmelCase_ : str , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Split = Split.train , ) -> List[str]:
"""simple docstring"""
_a = os.path.join(
lowerCAmelCase_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(lowerCAmelCase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
_a = torch.load(lowerCAmelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
_a = token_classification_task.read_examples_from_file(lowerCAmelCase_ , lowerCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowerCAmelCase_ )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A :
lowercase_ = 42
lowercase_ = -100
def __init__( self : Optional[Any] , lowerCAmelCase_ : TokenClassificationTask , lowerCAmelCase_ : str , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Split = Split.train , ) -> str:
"""simple docstring"""
_a = token_classification_task.read_examples_from_file(lowerCAmelCase_ , lowerCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_a = token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a = tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a = tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 22 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : list[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
_a = 0
print(UpperCamelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase , end=''',''' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = [1, 3, 0, 5, 8, 5]
_snake_case : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case : List[Any] = 16
_snake_case : Tuple = 32
def snake_case_ (UpperCamelCase : Accelerator , UpperCamelCase : int = 16 , UpperCamelCase : str = "bert-base-cased" ):
'''simple docstring'''
_a = AutoTokenizer.from_pretrained(UpperCamelCase )
_a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
_a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config['''num_epochs'''] )
_a = int(config['''seed'''] )
_a = int(config['''batch_size'''] )
_a = args.model_name_or_path
set_seed(UpperCamelCase )
_a , _a = get_dataloaders(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase )
# Instantiate optimizer
_a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a = optimizer_cls(params=model.parameters() , lr=UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_a = 1
_a = (len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=0 , num_training_steps=UpperCamelCase , )
else:
_a = DummyScheduler(UpperCamelCase , total_num_steps=UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
_a = 0
# We also need to keep track of the stating epoch so files are named properly
_a = 0
# Now we train the model
_a = evaluate.load('''glue''' , '''mrpc''' )
_a = 0
_a = {}
for epoch in range(UpperCamelCase , UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
_a = model(**UpperCamelCase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a = 0
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase ) - 1:
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
_a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCamelCase , default=UpperCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase , default=3 , help='''Number of train epochs.''' , )
_a = parser.parse_args()
_a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 1 |
'''simple docstring'''
import random
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Any ):
'''simple docstring'''
_a = a[left_index]
_a = left_index + 1
for j in range(left_index + 1 , UpperCamelCase ):
if a[j] < pivot:
_a , _a = a[i], a[j]
i += 1
_a , _a = a[i - 1], a[left_index]
return i - 1
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
if left < right:
_a = random.randint(UpperCamelCase , right - 1 )
_a , _a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_a = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
quick_sort_random(
UpperCamelCase , UpperCamelCase , UpperCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase , pivot_index + 1 , UpperCamelCase ) # recursive quicksort to the right of the pivot point
def snake_case_ ():
'''simple docstring'''
_a = input('''Enter numbers separated by a comma:\n''' ).strip()
_a = [int(UpperCamelCase ) for item in user_input.split(''',''' )]
quick_sort_random(UpperCamelCase , 0 , len(UpperCamelCase ) )
print(UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=True ) -> str:
"""simple docstring"""
_a = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
_a = ()
for resnet in self.resnets:
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=True ) -> int:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_a = []
for _ in range(self.num_layers ):
_a = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
def __call__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=True ) -> List[str]:
"""simple docstring"""
_a = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 22 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Dict=False , UpperCamelCase : str=False , UpperCamelCase : str=False ):
'''simple docstring'''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_a = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : int ):
'''simple docstring'''
_a = dct.pop(UpperCamelCase )
_a = val
@torch.no_grad()
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : int ):
'''simple docstring'''
_a = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
_a = False
_a = False
_a = False
_a = False
if "vqa" in checkpoint_url:
_a = True
_a = 3129
_a = '''huggingface/label-files'''
_a = '''vqa2-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
_a = True
_a = 2
_a = {0: '''False''', 1: '''True'''}
_a = {v: k for k, v in config.idalabel.items()}
_a = 3
_a = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
_a = True
_a = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_a = True
_a = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' )['''state_dict''']
_a = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
_a = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_a , _a = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
_a = ViltImageProcessor(size=384 )
_a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_a = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
_a = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
_a = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_a = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=UpperCamelCase ).raw )
if mlm_model:
_a = '''a bunch of [MASK] laying on a [MASK].'''
else:
_a = '''How many cats are there?'''
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
_a = torch.Size([1, 11, 3_0522] )
_a = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_a = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_a = torch.Size([1, 3129] )
_a = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_a = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_a = torch.Size([1, 2] )
_a = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 22 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A :
lowercase_ = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase_ = field(
default=_a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default=_a ,metadata={'help': 'The column name of the images in the files.'} )
lowercase_ = field(default=_a ,metadata={'help': 'A folder containing the training data.'} )
lowercase_ = field(default=_a ,metadata={'help': 'A folder containing the validation data.'} )
lowercase_ = field(
default=0.15 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = {}
if self.train_dir is not None:
_a = self.train_dir
if self.validation_dir is not None:
_a = self.validation_dir
_a = data_files if data_files else None
@dataclass
class A :
lowercase_ = field(
default=_a ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowercase_ = field(default=_a ,metadata={'help': 'Name or path of preprocessor config.'} )
lowercase_ = field(
default=_a ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowercase_ = field(
default=0.75 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A ( _a ):
lowercase_ = field(
default=1e-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_a = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0:
_a = ds['''train'''].train_test_split(data_args.train_val_split )
_a = split['''train''']
_a = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_a = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_a = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_a = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_a = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase )
elif model_args.model_name_or_path:
_a = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
_a = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_a = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_a = ViTMAEForPreTraining(UpperCamelCase )
if training_args.do_train:
_a = ds['''train'''].column_names
else:
_a = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_a = data_args.image_column_name
elif "image" in column_names:
_a = '''image'''
elif "img" in column_names:
_a = '''img'''
else:
_a = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_a = image_processor.size['''shortest_edge''']
else:
_a = (image_processor.size['''height'''], image_processor.size['''width'''])
_a = Compose(
[
Lambda(lambda UpperCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase : Union[str, Any] ):
_a = [transforms(UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_a = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_a = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase )
# Compute absolute learning rate
_a = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_a = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_a = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCamelCase )
trainer.save_metrics('''eval''' , UpperCamelCase )
# Write model card and (optionally) push to hub
_a = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 1 |
'''simple docstring'''
import argparse
import copy
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
_a = {}
with open(UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_a = []
_list.append([line.split()[1], line.split()[2]] )
_a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_a = []
_list.append([line.split()[0], line.split()[2]] )
_a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
with open(UpperCamelCase ) as f:
_a = f.read(1 )
_a = start_node
_a = []
_a = start_node
_a = 0
while visiting not in first_solution:
_a = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase ) and k[0] not in first_solution:
_a = k[1]
_a = k[0]
first_solution.append(UpperCamelCase )
_a = distance_of_first_solution + int(UpperCamelCase )
_a = best_node
first_solution.append(UpperCamelCase )
_a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = []
for n in solution[1:-1]:
_a = solution.index(UpperCamelCase )
for kn in solution[1:-1]:
_a = solution.index(UpperCamelCase )
if n == kn:
continue
_a = copy.deepcopy(UpperCamelCase )
_a = kn
_a = n
_a = 0
for k in _tmp[:-1]:
_a = _tmp[_tmp.index(UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_a = distance + int(i[1] )
_tmp.append(UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_a = 1
_a = first_solution
_a = []
_a = distance_of_first_solution
_a = solution
while count <= iters:
_a = find_neighborhood(UpperCamelCase , UpperCamelCase )
_a = 0
_a = neighborhood[index_of_best_solution]
_a = len(UpperCamelCase ) - 1
_a = False
while not found:
_a = 0
while i < len(UpperCamelCase ):
if best_solution[i] != solution[i]:
_a = best_solution[i]
_a = solution[i]
break
_a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_a = True
_a = best_solution[:-1]
_a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_a = cost
_a = solution
else:
_a = index_of_best_solution + 1
_a = neighborhood[index_of_best_solution]
if len(UpperCamelCase ) >= size:
tabu_list.pop(0 )
_a = count + 1
return best_solution_ever, best_cost
def snake_case_ (UpperCamelCase : str=None ):
'''simple docstring'''
_a = generate_neighbours(args.File )
_a , _a = generate_first_solution(
args.File , UpperCamelCase )
_a , _a = tabu_search(
UpperCamelCase , UpperCamelCase , UpperCamelCase , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 22 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : str = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_a = precision
_a = ceil(precision / 14 )
_a = 42_6880 * Decimal(1_0005 ).sqrt()
_a = 1
_a = 1359_1409
_a = Decimal(UpperCamelCase )
for k in range(1 , UpperCamelCase ):
_a = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_snake_case : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = SwinvaConfig()
_a = swinva_name.split('''_''' )
_a = name_split[1]
if "to" in name_split[3]:
_a = int(name_split[3][-3:] )
else:
_a = int(name_split[3] )
if "to" in name_split[2]:
_a = int(name_split[2][-2:] )
else:
_a = int(name_split[2][6:] )
if model_size == "tiny":
_a = 96
_a = (2, 2, 6, 2)
_a = (3, 6, 12, 24)
elif model_size == "small":
_a = 96
_a = (2, 2, 18, 2)
_a = (3, 6, 12, 24)
elif model_size == "base":
_a = 128
_a = (2, 2, 18, 2)
_a = (4, 8, 16, 32)
else:
_a = 192
_a = (2, 2, 18, 2)
_a = (6, 12, 24, 48)
if "to" in swinva_name:
_a = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_a = 2_1841
_a = '''huggingface/label-files'''
_a = '''imagenet-22k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
else:
_a = 1000
_a = '''huggingface/label-files'''
_a = '''imagenet-1k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = img_size
_a = num_classes
_a = embed_dim
_a = depths
_a = num_heads
_a = window_size
return config
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_a = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_a = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_a = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_a = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
_a = '''layernorm.weight'''
if name == "norm.bias":
_a = '''layernorm.bias'''
if "head" in name:
_a = name.replace('''head''' , '''classifier''' )
else:
_a = '''swinv2.''' + name
return name
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[1] )
_a = int(key_split[3] )
_a = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[
dim : dim * 2
]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
_a = get_swinva_config(UpperCamelCase )
_a = SwinvaForImageClassification(UpperCamelCase )
model.eval()
_a = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
_a = image_processor(images=UpperCamelCase , return_tensors='''pt''' )
_a = timm_model(inputs['''pixel_values'''] )
_a = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 22 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 1 |
'''simple docstring'''
import pytest
_snake_case : List[Any] = '__dummy_dataset1__'
_snake_case : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def snake_case_ ():
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def snake_case_ ():
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def snake_case_ (UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
_a = dataset_loading_script_name
_a = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=UpperCamelCase )
_a = script_dir / f'{script_name}.py'
with open(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase )
return str(UpperCamelCase )
| 22 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 1 |
'''simple docstring'''
import math
import qiskit
def snake_case_ (UpperCamelCase : int = 1 , UpperCamelCase : int = 1 , UpperCamelCase : int = 1 ):
'''simple docstring'''
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_a = qiskit.QuantumRegister(4 , '''qr''' )
_a = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_a = [input_a, input_a, carry_in]
_a = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 22 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case : Optional[int] = 'src/transformers'
_snake_case : str = 'docs/source/en'
_snake_case : Optional[Any] = '.'
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Tuple ):
'''simple docstring'''
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_a = f.readlines()
# Find the start prompt.
_a = 0
while not lines[start_index].startswith(UpperCamelCase ):
start_index += 1
start_index += 1
_a = start_index
while not lines[end_index].startswith(UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case : List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
_snake_case : Any = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
_snake_case : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase )
return [m.group(0 ) for m in matches]
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = 2 if text == '''✅''' or text == '''❌''' else len(UpperCamelCase )
_a = (width - text_length) // 2
_a = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def snake_case_ ():
'''simple docstring'''
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_a = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_a = collections.defaultdict(UpperCamelCase )
_a = collections.defaultdict(UpperCamelCase )
_a = collections.defaultdict(UpperCamelCase )
_a = collections.defaultdict(UpperCamelCase )
_a = collections.defaultdict(UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase ):
_a = None
if attr_name.endswith('''Tokenizer''' ):
_a = slow_tokenizers
_a = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
_a = fast_tokenizers
_a = attr_name[:-13]
elif _re_tf_models.match(UpperCamelCase ) is not None:
_a = tf_models
_a = _re_tf_models.match(UpperCamelCase ).groups()[0]
elif _re_flax_models.match(UpperCamelCase ) is not None:
_a = flax_models
_a = _re_flax_models.match(UpperCamelCase ).groups()[0]
elif _re_pt_models.match(UpperCamelCase ) is not None:
_a = pt_models
_a = _re_pt_models.match(UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(UpperCamelCase )[:-1] )
# Let's build that table!
_a = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_a = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_a = [len(UpperCamelCase ) + 2 for c in columns]
_a = max([len(UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
_a = '''|''' + '''|'''.join([_center_text(UpperCamelCase , UpperCamelCase ) for c, w in zip(UpperCamelCase , UpperCamelCase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
_a = {True: '''✅''', False: '''❌'''}
for name in model_names:
_a = model_name_to_prefix[name]
_a = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase , UpperCamelCase ) for l, w in zip(UpperCamelCase , UpperCamelCase )] ) + "|\n"
return table
def snake_case_ (UpperCamelCase : str=False ):
'''simple docstring'''
_a , _a , _a , _a = _find_text_in_file(
filename=os.path.join(UpperCamelCase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
_a = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
import math
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ (UpperCamelCase : int = 1_0001 ):
'''simple docstring'''
try:
_a = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_a = []
_a = 2
while len(UpperCamelCase ) < nth:
if is_prime(UpperCamelCase ):
primes.append(UpperCamelCase )
num += 1
else:
num += 1
return primes[len(UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.