code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
def count_of_possible_combinations(lowercase_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
def count_of_possible_combinations_with_dp_array(
lowercase_ : int , lowercase_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase = sum(
count_of_possible_combinations_with_dp_array(target - item , lowercase_ )
for item in array )
lowercase = answer
return answer
lowercase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
lowercase = [0] * (target + 1)
lowercase = 1
for i in range(1 , target + 1 ):
for j in range(lowercase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = 3
lowercase_ : Any = 5
lowercase_ : Dict = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 653 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : list ):
def merge(lowercase_ : list , lowercase_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase_ ) <= 1:
return collection
lowercase = len(lowercase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 653 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self ) -> int:
'''simple docstring'''
lowercase = SMALL_MODEL_IDENTIFIER
lowercase = """pt"""
lowercase = """tf"""
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = """mock_framework"""
# Framework provided - return whatever the user provides
lowercase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
lowercase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
lowercase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
lowercase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
lowercase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
lowercase = FeaturesManager.determine_framework(_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = MagicMock(return_value=_lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowerCAmelCase ):
lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase = MagicMock(return_value=_lowerCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , _lowerCAmelCase ):
lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase = MagicMock(return_value=_lowerCAmelCase )
lowercase = MagicMock(return_value=_lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowerCAmelCase ):
lowercase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowercase = MagicMock(return_value=_lowerCAmelCase )
lowercase = MagicMock(return_value=_lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
lowercase = FeaturesManager.determine_framework(self.test_model )
| 653 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase = range(3 , int(math.sqrt(lowercase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[str]=1 , **lowercase_ : List[str] ):
lowercase = factor * value
lowercase = value
while not is_prime(lowercase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase_ )
return value
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase (_UpperCAmelCase ):
__A = ['''vqvae''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , mel=_lowerCAmelCase , vqvae=_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , _lowerCAmelCase ) else 1000
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCAmelCase )
lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCAmelCase , device=self.device , )
lowercase = noise
lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self.mel.audio_slice_to_image(_lowerCAmelCase )
lowercase = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowercase = (input_image / 255) * 2 - 1
lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase , 0 ) ).latent_dist.sample(
generator=_lowerCAmelCase )[0]
lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase = int(mask_start_secs * pixels_per_second )
lowercase = int(mask_end_secs * pixels_per_second )
lowercase = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCAmelCase ):
lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["""sample"""]
else:
lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase )["""sample"""]
if isinstance(self.scheduler , _lowerCAmelCase ):
lowercase = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , )["""prev_sample"""]
else:
lowercase = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase = 1 / self.vqvae.config.scaling_factor * images
lowercase = self.vqvae.decode(_lowerCAmelCase )["""sample"""]
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase = (images * 255).round().astype("""uint8""" )
lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowercase = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCAmelCase ) )
@torch.no_grad()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , _lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase )
lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase = (sample / 255) * 2 - 1
lowercase = torch.Tensor(_lowerCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase = self.scheduler.alphas_cumprod[t]
lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase = 1 - alpha_prod_t
lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase )["""sample"""]
lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> torch.Tensor:
'''simple docstring'''
lowercase = acos(torch.dot(torch.flatten(_lowerCAmelCase ) , torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase_ : Optional[int] = random.Random()
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int=1.0 , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None ):
if rng is None:
lowercase = global_rng
lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=400 , _lowerCAmelCase=2000 , _lowerCAmelCase=1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1_6000 , _lowerCAmelCase=True , _lowerCAmelCase=80 , _lowerCAmelCase=16 , _lowerCAmelCase=64 , _lowerCAmelCase="hann_window" , _lowerCAmelCase=80 , _lowerCAmelCase=7600 , _lowerCAmelCase=1E-10 , _lowerCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = min_seq_length
lowercase = max_seq_length
lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase = feature_size
lowercase = padding_value
lowercase = sampling_rate
lowercase = do_normalize
lowercase = num_mel_bins
lowercase = hop_length
lowercase = win_length
lowercase = win_function
lowercase = fmin
lowercase = fmax
lowercase = mel_floor
lowercase = return_attention_mask
def _a ( self ) -> List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _a ( self , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(_lowerCAmelCase ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
lowercase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def _a ( self , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
'''simple docstring'''
if equal_length:
lowercase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = SpeechTaFeatureExtractor
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = SpeechTaFeatureExtractionTester(self )
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
lowercase = feat_extract(_lowerCAmelCase , return_tensors="""np""" ).input_values
lowercase = feat_extract(_lowerCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = ["""longest""", """max_length""", """do_not_pad"""]
lowercase = [None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="""np""" )
lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase = range(800 , 1400 , 200 )
lowercase = [floats_list((1, x) )[0] for x in lengths]
lowercase = ["""longest""", """max_length""", """do_not_pad"""]
lowercase = [None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase = np.random.rand(100 ).astype(np.floataa )
lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_values
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase = np.asarray(_lowerCAmelCase )
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_values
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.feat_extract_tester.prepare_inputs_for_target()
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = self.feat_extract_tester.prepare_inputs_for_target()
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = feat_extract.num_mel_bins # hack!
lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.feat_extract_dict
lowercase = True
lowercase = self.feature_extraction_class(**_lowerCAmelCase )
lowercase = self.feat_extract_tester.prepare_inputs_for_target()
lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = feat_extract.num_mel_bins # hack!
lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.feat_extract_dict
lowercase = True
lowercase = self.feature_extraction_class(**_lowerCAmelCase )
lowercase = self.feat_extract_tester.prepare_inputs_for_target()
lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = min(_lowerCAmelCase )
lowercase = feat_extract.num_mel_bins # hack!
lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
from datasets import load_dataset
lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase = ds.sort("""id""" ).select(range(_lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
lowercase = self._load_datasamples(1 )
lowercase = SpeechTaFeatureExtractor()
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
lowercase = self._load_datasamples(1 )
lowercase = SpeechTaFeatureExtractor()
lowercase = feature_extractor(audio_target=_lowerCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) )
| 653 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Dict = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''marian'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCAmelCase=5_8101 , _lowerCAmelCase=None , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=1024 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=5_8100 , _lowerCAmelCase=False , _lowerCAmelCase=5_8100 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = decoder_vocab_size or vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase = {0: """batch"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """decoder_sequence"""}
lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(_lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(_lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
lowercase = common_inputs["""decoder_input_ids"""].shape[1]
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase = self.num_layers
lowercase = min(_lowerCAmelCase , _lowerCAmelCase )
lowercase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
lowercase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase , lowercase = self.num_layers
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
lowercase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
lowercase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
lowercase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-4
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import socket
def SCREAMING_SNAKE_CASE ( ):
lowercase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase = socket.gethostname()
lowercase = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase = sock.recv(1024 )
if not data:
break
out_file.write(lowercase_ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase_ : Any = 1.6_021e-19 # units = C
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowercase_ : List[Any] = None
lowercase_ : Any = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
lowercase_ : str = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Any=1 , lowercase_ : Optional[int]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
with open(lowercase_ , """r""" ) as f:
return json.load(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : int ):
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str=True ):
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase = os.path.join(lowercase_ , """tmp""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase = read_json(os.path.join(lowercase_ , """params.json""" ) )
lowercase = NUM_SHARDS[model_size]
lowercase = params["""n_layers"""]
lowercase = params["""n_heads"""]
lowercase = n_heads // num_shards
lowercase = params["""dim"""]
lowercase = dim // n_heads
lowercase = 10_000.0
lowercase = 1.0 / (base ** (torch.arange(0 , lowercase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase = params["""n_kv_heads"""] # for GQA / MQA
lowercase = n_heads_per_shard // num_key_value_heads
lowercase = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase = n_heads
lowercase = n_heads_per_shard
lowercase = dim
# permute for sliced rotary
def permute(lowercase_ : Tuple , lowercase_ : List[Any]=n_heads , lowercase_ : int=dim , lowercase_ : List[str]=dim ):
return w.view(lowercase_ , dima // n_heads // 2 , 2 , lowercase_ ).transpose(1 , 2 ).reshape(lowercase_ , lowercase_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase = torch.load(os.path.join(lowercase_ , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
lowercase = [
torch.load(os.path.join(lowercase_ , F"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(lowercase_ )
]
lowercase = 0
lowercase = {"""weight_map""": {}}
for layer_i in range(lowercase_ ):
lowercase = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowercase = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) )
lowercase = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , )
lowercase = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowercase_ )] , dim=1 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowercase_ )] , dim=0 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowercase_ )] , dim=1 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowercase_ )] , dim=0 )
lowercase = inv_freq
for k, v in state_dict.items():
lowercase = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
lowercase = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
lowercase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(lowercase_ )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(lowercase_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
# Write configs
lowercase = {"""total_size""": param_count * 2}
write_json(lowercase_ , os.path.join(lowercase_ , """pytorch_model.bin.index.json""" ) )
lowercase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
lowercase = params["""multiple_of"""] if """multiple_of""" in params else 256
lowercase = LlamaConfig(
hidden_size=lowercase_ , intermediate_size=compute_intermediate_size(lowercase_ , lowercase_ , lowercase_ ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=lowercase_ , )
config.save_pretrained(lowercase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
lowercase = LlamaForCausalLM.from_pretrained(lowercase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowercase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(lowercase_ , safe_serialization=lowercase_ )
shutil.rmtree(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : str ):
# Initialize the tokenizer based on the `spm` model
lowercase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowercase = tokenizer_class(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=lowercase_ , help="""Whether or not to save using `safetensors`.""" )
lowercase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , lowercase_ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowercase_ : Optional[int] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowercase_ : int = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowercase_ : Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase (datasets.Metric ):
def _a ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> int:
'''simple docstring'''
lowercase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 653 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
lowercase = False
lowercase = nn.Dropout(p=_lowerCAmelCase )
lowercase = TaConfig(
vocab_size=_lowerCAmelCase , d_model=_lowerCAmelCase , num_heads=_lowerCAmelCase , d_kv=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase , feed_forward_proj=_lowerCAmelCase , is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , )
lowercase = nn.ModuleList()
for lyr_num in range(_lowerCAmelCase ):
lowercase = TaBlock(_lowerCAmelCase )
self.encoders.append(_lowerCAmelCase )
lowercase = TaLayerNorm(_lowerCAmelCase )
lowercase = nn.Dropout(p=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.token_embedder(_lowerCAmelCase )
lowercase = encoder_input_tokens.shape[1]
lowercase = torch.arange(_lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCAmelCase )
lowercase = self.dropout_pre(_lowerCAmelCase )
# inverted the attention mask
lowercase = encoder_input_tokens.size()
lowercase = self.get_extended_attention_mask(_lowerCAmelCase , _lowerCAmelCase )
for lyr in self.encoders:
lowercase = lyr(_lowerCAmelCase , _lowerCAmelCase )[0]
lowercase = self.layer_norm(_lowerCAmelCase )
return self.dropout_post(_lowerCAmelCase ), encoder_inputs_mask
| 653 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase_ : List[str] = False
lowercase_ : Tuple = True
lowercase_ : Any = False
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowercase_ : Tuple = parser.parse_args()
lowercase_ : Optional[Any] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowercase_ : List[str] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowercase_ : Optional[int] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowercase_ : str = reader.read()
lowercase_ : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowercase_ : str = UNetaDModel(**config)
else:
lowercase_ : List[str] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowercase_ : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase_ : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase_ : Optional[Any] = config[key]
del config[key]
lowercase_ : str = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowercase_ : List[str] = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowercase_ : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowercase_ : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowercase_ : int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowercase_ : Optional[Any] = param_value
lowercase_ : List[Any] = True
if not has_changed:
lowercase_ : List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 653 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Tuple=7 ):
lowercase = None
if token is not None:
lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase = """636036"""
lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase = requests.get(lowercase_ , headers=lowercase_ ).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = get_daily_ci_runs(lowercase_ )
lowercase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase = workflow_run["""id"""]
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
lowercase = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
lowercase = get_artifacts_links(worflow_run_id=lowercase_ , token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ , artifact_url=lowercase_ , output_dir=lowercase_ , token=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ):
get_last_daily_ci_artifacts(lowercase_ , lowercase_ , lowercase_ )
lowercase = {}
for artifact_name in artifact_names:
lowercase = os.path.join(lowercase_ , F"""{artifact_name}.zip""" )
if os.path.isfile(lowercase_ ):
lowercase = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
lowercase = f.read().decode("""UTF-8""" )
return results
| 653 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase (_UpperCAmelCase ):
__A = (DPMSolverSinglestepScheduler,)
__A = (('''num_inference_steps''', 25),)
def _a ( self , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**_lowerCAmelCase )
return config
def _a ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self ) -> Dict:
'''simple docstring'''
pass
def _a ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase = 50
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _a ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase = self.full_loop(scheduler=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
lowercase = DEISMultistepScheduler.from_config(scheduler.config )
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase = self.full_loop(scheduler=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="""dpmsolver++""" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _a ( self ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
lowercase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _a ( self ) -> Dict:
'''simple docstring'''
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="""learned_range""" )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.full_loop(prediction_type="""v_prediction""" )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = 1
lowercase = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = True
lowercase = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowercase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 50 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowercase = self.segmentation_model(**_lowerCAmelCase )
lowercase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 653 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowercase_ : Optional[Any] = trt.Logger(trt.Logger.WARNING)
lowercase_ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowercase_ : str = parser.parse_args()
if args.tokenizer_name:
lowercase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowercase_ : int = args.per_device_eval_batch_size
lowercase_ : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowercase_ : Optional[Any] = True
lowercase_ : List[str] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowercase_ : Tuple = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowercase_ : str = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowercase_ : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowercase_ : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowercase_ : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowercase_ : Any = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowercase_ : str = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowercase_ : List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Any ):
lowercase = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ )
# start time
lowercase = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase = time.time()
lowercase = end_time - start_time
lowercase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowercase_ : List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : Tuple = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowercase_ : Optional[int] = raw_datasets['''validation'''].column_names
lowercase_ : Tuple = '''question''' if '''question''' in column_names else column_names[0]
lowercase_ : int = '''context''' if '''context''' in column_names else column_names[1]
lowercase_ : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowercase_ : Union[str, Any] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowercase_ : str = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase = tokenized_examples.sequence_ids(lowercase_ )
lowercase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowercase_ : List[Any] = raw_datasets['''validation''']
# Validation Feature Creation
lowercase_ : Tuple = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowercase_ : List[str] = default_data_collator
lowercase_ : Any = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowercase_ : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase = postprocess_qa_predictions(
examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ )
lowercase_ : str = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowercase_ : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowercase_ : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowercase_ : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowercase_ : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
lowercase_ : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowercase_ : List[Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowercase_ : List[Any] = 0.0
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = timeit.default_timer()
lowercase_ : int = None
for step, batch in enumerate(eval_dataloader):
lowercase_ , lowercase_ : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowercase_ , lowercase_ : Union[str, Any] = outputs
lowercase_ : Dict = torch.tensor(start_logits)
lowercase_ : str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowercase_ : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowercase_ : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowercase_ : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowercase_ : Any = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowercase_ : Dict = nested_truncate(all_preds, len(eval_dataset))
lowercase_ : List[str] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowercase_ : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowercase_ : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 653 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase (_UpperCAmelCase ):
__A = (UniPCMultistepScheduler,)
__A = (('''num_inference_steps''', 25),)
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , _lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(_lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowercase = self.full_loop(scheduler=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase = DEISMultistepScheduler.from_config(scheduler.config )
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase = self.full_loop(scheduler=_lowerCAmelCase )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _a ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , )
lowercase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _a ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _a ( self ) -> str:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.full_loop(prediction_type="""v_prediction""" )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def _a ( self , **_lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**_lowerCAmelCase )
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 653 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = RobertaTokenizer
__A = RobertaTokenizerFast
__A = True
__A = {'''cls_token''': '''<s>'''}
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = """lower newer"""
lowercase = """lower newer"""
return input_text, output_text
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase = """lower newer"""
lowercase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase = tokenizer.tokenize(_lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_lowerCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_lowerCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.tokenizer_class.from_pretrained("""roberta-base""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = """Encode this sequence."""
lowercase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing spaces after special tokens
lowercase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )} ) # mask token has a left space
lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
lowercase = """Encode <mask> sequence"""
lowercase = """Encode <mask>sequence"""
lowercase = tokenizer.encode(_lowerCAmelCase )
lowercase = encoded.index(_lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokenizer.encode(_lowerCAmelCase )
lowercase = encoded.index(_lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowercase = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowercase = """A, <mask> AllenNLP sentence."""
lowercase = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
lowercase = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _a ( self ) -> int:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _lowerCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , _lowerCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase = F"""{text_of_1_token} {text_of_1_token}"""
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
lowercase = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
| 653 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : list[str] ):
lowercase = """"""
for word_or_phrase in separated:
if not isinstance(lowercase_ , lowercase_ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(lowercase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 1 |
'''simple docstring'''
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( lowercase_ : Callable[[float], float] , lowercase_ : float , lowercase_ : float ):
lowercase = a
lowercase = b
if function(lowercase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase_ ) == 0:
return b
elif (
function(lowercase_ ) * function(lowercase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
lowercase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase_ ) == 0:
return mid
elif function(lowercase_ ) * function(lowercase_ ) < 0:
lowercase = mid
else:
lowercase = mid
lowercase = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( lowercase_ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = int(lowercase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase_ )
lowercase , lowercase = divmod(lowercase_ , 2 )
return binary_recursive(lowercase_ ) + str(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = str(lowercase_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowercase = """-""" if number.startswith("""-""" ) else """"""
lowercase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F"""{negative}0b{binary_recursive(int(lowercase_ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
__A = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__A = Features({'''image''': Image()} )
__A = Features({'''labels''': ClassLabel} )
__A = "image"
__A = "labels"
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def _a ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 653 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase (_UpperCAmelCase ):
__A = ['''image_processor''', '''tokenizer''']
__A = '''BlipImageProcessor'''
__A = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowercase = self.tokenizer
lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
lowercase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
lowercase = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 653 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ):
lowercase = 10
lowercase = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowercase = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase_ ) ),
} , features=lowercase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase_ )
return filename
# FILE_CONTENT + files
lowercase_ : Dict = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowercase = FILE_CONTENT
with open(lowercase_ , """w""" ) as f:
f.write(lowercase_ )
return filename
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
import bza
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowercase = bytes(lowercase_ , """utf-8""" )
with bza.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowercase = bytes(lowercase_ , """utf-8""" )
with gzip.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowercase = bytes(lowercase_ , """utf-8""" )
with lza.frame.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase_ , """w""" ) as archive:
archive.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[Any] ):
import tarfile
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
import lzma
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowercase = bytes(lowercase_ , """utf-8""" )
with lzma.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Dict ):
import zipfile
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowercase = bytes(lowercase_ , """utf-8""" )
with zstd.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowercase = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase_ , """w""" ) as f:
f.write(lowercase_ )
return filename
lowercase_ : Dict = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowercase_ : str = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowercase_ : List[Any] = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowercase_ : Dict = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowercase_ : int = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
lowercase = datasets.Dataset.from_dict(lowercase_ )
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Any ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase_ ) ) as con:
lowercase = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase_ , """w""" , newline="""""" ) as f:
lowercase = csv.DictWriter(lowercase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase_ , """w""" , newline="""""" ) as f:
lowercase = csv.DictWriter(lowercase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Optional[Any] ):
import bza
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase_ , """rb""" ) as f:
lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowercase = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase_ , """wb""" ) as f:
lowercase = pq.ParquetWriter(lowercase_ , schema=lowercase_ )
lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase_ ) )] for k in DATA[0]} , schema=lowercase_ )
writer.write_table(lowercase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowercase = {"""data""": DATA}
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowercase = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Any ):
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase_ , """rb""" ) as orig_file:
with gzip.open(lowercase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] ):
import gzip
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase_ , """rb""" ) as orig_file:
with gzip.open(lowercase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Any , lowercase_ : str , lowercase_ : str ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""nested""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.join("""nested""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = ["""0""", """1""", """2""", """3"""]
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[str] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
lowercase = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : Optional[int] = 16
lowercase_ : Tuple = 32
def SCREAMING_SNAKE_CASE ( lowercase_ : Accelerator , lowercase_ : int = 16 ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : Any = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Dict ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
lowercase = 2
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase = batch_size // MAX_GPU_BATCH_SIZE
lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
lowercase , lowercase = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowercase_ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase_ : List[str] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : str ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
lowercase = TestCommand(*lowercase_ )
test_command.run()
lowercase = os.path.join(lowercase_ , """README.md""" )
assert os.path.exists(lowercase_ )
lowercase = DatasetInfosDict.from_directory(lowercase_ )
lowercase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase , lowercase = getattr(dataset_infos["""default"""] , lowercase_ ), getattr(expected_dataset_infos["""default"""] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 653 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> str:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_input_mask
lowercase = use_labels
lowercase = use_mc_token_ids
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
if self.use_mc_token_ids:
lowercase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self ) -> Any:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__A = (CTRLLMHeadModel,) if is_torch_available() else ()
__A = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = CTRLModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def _a ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_lowerCAmelCase )
lowercase = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
lowercase = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=False , ) -> Optional[Any]:
'''simple docstring'''
lowercase = size if size is not None else {"""height""": 20, """width""": 20}
lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_reduce_labels
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(dataset[0]["""file"""] )
lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(ds[0]["""file"""] )
lowercase = Image.open(ds[1]["""file"""] )
lowercase = Image.open(ds[2]["""file"""] )
lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = BeitImageProcessor if is_vision_available() else None
def _a ( self ) -> int:
'''simple docstring'''
lowercase = BeitImageProcessingTester(self )
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
lowercase = []
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowercase , lowercase = prepare_semantic_batch_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowercase = True
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Dict="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) as f:
lowercase = json.load(lowercase_ )
lowercase = {}
lowercase = []
lowercase = []
for key, info in class_info.items():
lowercase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(lowercase_ ) )
lowercase = thing_ids
lowercase = class_names
return metadata
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=255 , _lowerCAmelCase="shi-labs/oneformer_demo" , _lowerCAmelCase="ade20k_panoptic.json" , _lowerCAmelCase=10 , ) -> str:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = class_info_file
lowercase = prepare_metadata(_lowerCAmelCase , _lowerCAmelCase )
lowercase = num_text
lowercase = repo_path
# for the post_process_functions
lowercase = 2
lowercase = 10
lowercase = 10
lowercase = 3
lowercase = 4
lowercase = num_labels
lowercase = do_reduce_labels
lowercase = ignore_index
def _a ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
'''simple docstring'''
if not batched:
lowercase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size["""shortest_edge"""] * h / w )
lowercase = self.size["""shortest_edge"""]
elif w > h:
lowercase = self.size["""shortest_edge"""]
lowercase = int(self.size["""shortest_edge"""] * w / h )
else:
lowercase = self.size["""shortest_edge"""]
lowercase = self.size["""shortest_edge"""]
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def _a ( self ) -> List[str]:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__A = image_processing_class
def _a ( self ) -> str:
'''simple docstring'''
lowercase = OneFormerImageProcessorTester(self )
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """ignore_index""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """class_info_file""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """num_text""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """repo_path""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """metadata""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_reduce_labels""" ) )
def _a ( self ) -> str:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
lowercase = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
lowercase = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
lowercase = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase="np" ) -> int:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase = self.image_processing_tester.num_labels
lowercase = None
lowercase = None
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
if with_segmentation_maps:
lowercase = num_labels
if is_instance_map:
lowercase = list(range(_lowerCAmelCase ) ) * 2
lowercase = dict(enumerate(_lowerCAmelCase ) )
lowercase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase = [Image.fromarray(_lowerCAmelCase ) for annotation in annotations]
lowercase = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , _lowerCAmelCase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowerCAmelCase , pad_and_return_pixel_mask=_lowerCAmelCase , )
return inputs
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
def common(_lowerCAmelCase=False , _lowerCAmelCase=None ):
lowercase = self.comm_get_image_processor_inputs(
with_segmentation_maps=_lowerCAmelCase , is_instance_map=_lowerCAmelCase , segmentation_type=_lowerCAmelCase )
lowercase = inputs["""mask_labels"""]
lowercase = inputs["""class_labels"""]
lowercase = inputs["""pixel_values"""]
lowercase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_lowerCAmelCase )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.zeros((20, 50) )
lowercase = 1
lowercase = 1
lowercase = 1
lowercase = binary_mask_to_rle(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowercase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase , target_sizes=_lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_instance_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_panoptic_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 10 , lowercase_ : int = 1000 , lowercase_ : bool = True ):
assert (
isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int ):
assert (
isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(lowercase_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
lowercase = lower
lowercase = higher
lowercase = []
while True:
lowercase = get_avg(lowercase_ , lowercase_ )
last_numbers.append(lowercase_ )
if answer(lowercase_ ) == "low":
lowercase = number
elif answer(lowercase_ ) == "high":
lowercase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def SCREAMING_SNAKE_CASE ( ):
lowercase = int(input("""Enter lower value : """ ).strip() )
lowercase = int(input("""Enter high value : """ ).strip() )
lowercase = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = self.get_feature_extractor()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = floats_list((3, 1000) )
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = """This is a test string"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self , _lowerCAmelCase=(2, 10, 16) , _lowerCAmelCase=77 ) -> List[Any]:
'''simple docstring'''
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowercase = processor.decode(_lowerCAmelCase )
lowercase = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
lowercase = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
lowercase = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase , lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 15
lowercase = -20.0
lowercase = -4.0
lowercase = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
lowercase = [d[0][2] for d in decoded_decoder_out]
lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 2.0
lowercase = 5.0
lowercase = -20.0
lowercase = True
lowercase = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = floats_list((3, 1000) )
lowercase = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowercase = self._get_dummy_logits()
lowercase = processor_wavaveca.batch_decode(_lowerCAmelCase )
lowercase = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = [d[key] for d in offsets]
return retrieved_list
def _a ( self ) -> str:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()[0]
lowercase = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()
lowercase = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowercase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
lowercase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
lowercase = iter(_lowerCAmelCase )
lowercase = next(_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
lowercase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
lowercase = model(_lowerCAmelCase ).logits.cpu().numpy()
lowercase = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
lowercase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowercase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __UpperCamelCase (datasets.BeamBasedBuilder ):
def _a ( self ) -> Dict:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
class __UpperCamelCase (datasets.BeamBasedBuilder ):
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def SCREAMING_SNAKE_CASE ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __UpperCamelCase (_UpperCAmelCase ):
@require_beam
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a ( self ) -> Dict:
'''simple docstring'''
import apache_beam as beam
lowercase = beam.io.parquetio.WriteToParquet
lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowercase = partial(_lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=_lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = NestedBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 653 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return number | (1 << position)
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Tuple = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowercase_ : Optional[int] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowercase_ : Any = 0
lowercase_ : Tuple = 1
lowercase_ : str = 2
lowercase_ : int = 3
lowercase_ : List[str] = 4
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = '''left'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = 3
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.remove_space:
lowercase = """ """.join(inputs.strip().split() )
else:
lowercase = inputs
lowercase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowercase = unicodedata.normalize("""NFKD""" , _lowerCAmelCase )
lowercase = """""".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
lowercase = outputs.lower()
return outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = self.preprocess_text(_lowerCAmelCase )
lowercase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
lowercase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase = cur_pieces[1:]
else:
lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _a ( self , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> str:
'''simple docstring'''
lowercase = kwargs.pop("""use_source_tokenizer""" , _lowerCAmelCase )
lowercase = self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase = []
lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
lowercase = []
sub_texts.append(_lowerCAmelCase )
else:
current_sub_text.append(_lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase = """""".join(_lowerCAmelCase )
lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase = self.clean_up_tokenization(_lowerCAmelCase )
return clean_text
else:
return text
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
for param in module.parameters():
lowercase = False
def SCREAMING_SNAKE_CASE ( ):
lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = plt.imshow(lowercase_ )
fig.axes.get_xaxis().set_visible(lowercase_ )
fig.axes.get_yaxis().set_visible(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
lowercase = datetime.now()
lowercase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 653 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase_ : List[str] = '''Muhammad Umer Farooq'''
lowercase_ : List[str] = '''MIT'''
lowercase_ : str = '''1.0.0'''
lowercase_ : Dict = '''Muhammad Umer Farooq'''
lowercase_ : Optional[Any] = '''[email protected]'''
lowercase_ : Any = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> None:
'''simple docstring'''
super().__init__()
lowercase = []
lowercase = domain
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase = parse.urljoin(self.domain , _lowerCAmelCase )
self.urls.append(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return ".".join(get_sub_domain_name(lowercase_ ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return parse.urlparse(lowercase_ ).netloc
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "https://github.com" ):
lowercase = get_domain_name(lowercase_ )
# Initialize the parser
lowercase = Parser(lowercase_ )
try:
# Open URL
lowercase = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase = requests.get(lowercase_ )
# Get the valid email.
lowercase = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
lowercase_ : int = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 653 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
from manim import *
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""CPU""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""GPU""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""Model""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
lowercase = []
lowercase = []
for i, rect in enumerate(_lowerCAmelCase ):
lowercase = fill.copy().set_fill(_lowerCAmelCase , opacity=0.8 )
target.move_to(_lowerCAmelCase )
model_arr.append(_lowerCAmelCase )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""Disk""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) )
lowercase = Square(0.3 )
input.set_fill(_lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCAmelCase , buff=0.5 )
self.play(Write(_lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCAmelCase ) )
self.play(FadeOut(_lowerCAmelCase ) )
lowercase = Arrow(start=_lowerCAmelCase , end=_lowerCAmelCase , color=_lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
lowercase = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCAmelCase ) , Circumscribe(model_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase = AnimationGroup(
FadeOut(_lowerCAmelCase , run_time=0.5 ) , MoveToTarget(_lowerCAmelCase , run_time=0.5 ) , FadeIn(_lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase = a_c
lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCAmelCase ) , FadeOut(_lowerCAmelCase , run_time=0.5 ) , )
lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , MoveToTarget(_lowerCAmelCase ) )
self.wait()
| 653 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ : Optional[Any] = False
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase=32 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(0 )
lowercase = UNetaDModel(sample_size=_lowerCAmelCase , in_channels=3 , out_channels=3 )
lowercase = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randn((4, 3, 32, 32) ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randint(0 , 1000 , (4,) ).long().to(_lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
| 653 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowercase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
lowercase = InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def _a ( self , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def _a ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowercase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
lowercase = qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCAmelCase )
lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 653 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''beit'''
def __init__( self , _lowerCAmelCase=8192 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = use_mask_token
lowercase = use_absolute_position_embeddings
lowercase = use_relative_position_bias
lowercase = use_shared_relative_position_bias
lowercase = layer_scale_init_value
lowercase = drop_path_rate
lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase = out_indices
lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = auxiliary_channels
lowercase = auxiliary_num_convs
lowercase = auxiliary_concat_input
lowercase = semantic_loss_ignore_index
class __UpperCamelCase (_UpperCAmelCase ):
__A = version.parse('''1.11''' )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-4
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> int:
'''simple docstring'''
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowercase = BlipProcessor(_lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCAmelCase ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def _a ( self , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def _a ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowercase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCAmelCase )
lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 653 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = None
__A = None
__A = None
__A = None
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=512 , _lowerCAmelCase="cls" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = project_dim
lowercase = pooler_fn
lowercase = learn_encoder
lowercase = use_attention_mask
class __UpperCamelCase (_UpperCAmelCase ):
__A = [R'''pooler''', R'''logit_scale''']
__A = [R'''position_ids''', R'''predictions.decoder.bias''']
__A = '''roberta'''
__A = RobertaSeriesConfig
def __init__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = XLMRobertaModel(_lowerCAmelCase )
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = getattr(_lowerCAmelCase , """has_pre_transformation""" , _lowerCAmelCase )
if self.has_pre_transformation:
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Any:
'''simple docstring'''
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.base_model(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCAmelCase , )
if self.has_pre_transformation:
lowercase = outputs["""hidden_states"""][-2]
lowercase = self.pre_LN(_lowerCAmelCase )
lowercase = self.transformation_pre(_lowerCAmelCase )
return TransformationModelOutput(
projection_state=_lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowercase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 653 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ : Dict = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase = ksize + 1
lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowercase_ ):
for x in range(lowercase_ ):
# distance from center
lowercase = x - ksize // 2
lowercase = y - ksize // 2
# degree to radiant
lowercase = theta / 180 * np.pi
lowercase = np.cos(_theta )
lowercase = np.sin(_theta )
# get kernel x
lowercase = cos_theta * px + sin_theta * py
# get kernel y
lowercase = -sin_theta * px + cos_theta * py
# fill kernel
lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase_ : List[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase_ : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase_ : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowercase_ : Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase_ : Tuple = out / out.max() * 255
lowercase_ : Dict = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ):
lowercase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] ):
lowercase = dct.pop(lowercase_ )
lowercase = val
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[str] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase = torch.cat((q_bias, torch.zeros_like(lowercase_ , requires_grad=lowercase_ ), v_bias) )
lowercase = qkv_bias
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : str ):
lowercase = 364 if """coco""" in model_name else 224
lowercase = BlipaVisionConfig(image_size=lowercase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowercase_ ).to_dict()
elif "opt-6.7b" in model_name:
lowercase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowercase_ ).to_dict()
elif "t5-xl" in model_name:
lowercase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowercase = BlipaConfig(vision_config=lowercase_ , text_config=lowercase_ )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=False ):
lowercase = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowercase = tokenizer("""\n""" , add_special_tokens=lowercase_ ).input_ids[0]
lowercase , lowercase = get_blipa_config(lowercase_ , eos_token_id=lowercase_ )
lowercase = BlipaForConditionalGeneration(lowercase_ ).eval()
lowercase = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowercase , lowercase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
lowercase , lowercase , lowercase = load_model_and_preprocess(
name=lowercase_ , model_type=lowercase_ , is_eval=lowercase_ , device=lowercase_ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase = original_model.state_dict()
lowercase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowercase_ )
if key.startswith("""Qformer.bert""" ):
lowercase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowercase = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowercase = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowercase = key.replace("""t5""" , """language""" )
lowercase = val
# read in qv biases
read_in_q_v_bias(lowercase_ , lowercase_ )
lowercase , lowercase = hf_model.load_state_dict(lowercase_ , strict=lowercase_ )
assert len(lowercase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase = load_demo_image()
lowercase = vis_processors["""eval"""](lowercase_ ).unsqueeze(0 ).to(lowercase_ )
lowercase = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowercase_ )
# create processor
lowercase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowercase_ , image_std=lowercase_ )
lowercase = BlipaProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
lowercase = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values.to(lowercase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase_ , lowercase_ )
original_model.to(lowercase_ )
hf_model.to(lowercase_ )
with torch.no_grad():
if "opt" in model_name:
lowercase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowercase = hf_model(lowercase_ , lowercase_ ).logits
else:
lowercase = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowercase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase = hf_model(lowercase_ , lowercase_ , labels=lowercase_ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=lowercase_ )
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=lowercase_ )
else:
# cast to same type
lowercase = logits.dtype
assert torch.allclose(original_logits.to(lowercase_ ) , lowercase_ , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowercase = """"""
lowercase = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids.to(lowercase_ )
lowercase = original_model.generate({"""image""": original_pixel_values} )
lowercase = hf_model.generate(
lowercase_ , lowercase_ , do_sample=lowercase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowercase_ )
lowercase = input_ids.shape[1]
lowercase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase_ )
lowercase = [text.strip() for text in output_text]
print("""HF generation:""" , lowercase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
lowercase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowercase_ : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
lowercase = []
create_all_state(1 , lowercase_ , lowercase_ , [] , lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[int] , lowercase_ : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase_ , total_number - level + 2 ):
current_list.append(lowercase_ )
create_all_state(i + 1 , lowercase_ , level - 1 , lowercase_ , lowercase_ )
current_list.pop()
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] ):
for i in total_list:
print(*lowercase_ )
if __name__ == "__main__":
lowercase_ : Any = 4
lowercase_ : Union[str, Any] = 2
lowercase_ : List[str] = generate_all_combinations(n, k)
print_all_state(total_list)
| 653 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : str = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''efficientformer'''
def __init__( self , _lowerCAmelCase = [3, 2, 6, 4] , _lowerCAmelCase = [48, 96, 224, 448] , _lowerCAmelCase = [True, True, True, True] , _lowerCAmelCase = 448 , _lowerCAmelCase = 32 , _lowerCAmelCase = 4 , _lowerCAmelCase = 7 , _lowerCAmelCase = 5 , _lowerCAmelCase = 8 , _lowerCAmelCase = 4 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 16 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 1E-5 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = 224 , _lowerCAmelCase = 1E-05 , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = hidden_sizes
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = num_channels
lowercase = depths
lowercase = mlp_expansion_ratio
lowercase = downsamples
lowercase = dim
lowercase = key_dim
lowercase = attention_ratio
lowercase = resolution
lowercase = pool_size
lowercase = downsample_patch_size
lowercase = downsample_stride
lowercase = downsample_pad
lowercase = drop_path_rate
lowercase = num_metaad_blocks
lowercase = distillation
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = image_size
lowercase = batch_norm_eps
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.9 , _lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.batch_size , -1 ).bool()
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = VideoMAEModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
'''simple docstring'''
lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size([1, 1408, 1536] )
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = torch.tensor(torch.tensor([0.6469] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = """"""
try:
with open(lowercase_ , """rb""" ) as binary_file:
lowercase = binary_file.read()
for dat in data:
lowercase = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {"""0""": """0""", """1""": """1"""}
lowercase , lowercase = """""", """"""
lowercase = len(lowercase_ )
for i in range(len(lowercase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase = lexicon[curr_string]
result += last_match_id
lowercase = last_match_id + """0"""
if math.loga(lowercase_ ).is_integer():
lowercase = {}
for curr_key in list(lowercase_ ):
lowercase = lexicon.pop(lowercase_ )
lowercase = new_lex
lowercase = last_match_id + """1"""
index += 1
lowercase = """"""
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = 8
try:
with open(lowercase_ , """wb""" ) as opened_file:
lowercase = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase_ ) , lowercase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase = data_bits[counter:]
lowercase = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = read_file_binary(lowercase_ )
lowercase = remove_prefix(lowercase_ )
lowercase = decompress_data(lowercase_ )
write_file_binary(lowercase_ , lowercase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
# TODO Update this
lowercase_ : str = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''esm'''
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1026 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , mask_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = emb_layer_norm_before
lowercase = token_dropout
lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase = EsmFoldConfig()
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = EsmFoldConfig(**_lowerCAmelCase )
lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase = get_default_vocab_list()
else:
lowercase = vocab_list
else:
lowercase = None
lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _lowerCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = super().to_dict()
if isinstance(self.esmfold_config , _lowerCAmelCase ):
lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = None
__A = True
__A = False
__A = False
__A = False
__A = 0
__A = True
__A = False
__A = 128
__A = None
def _a ( self ) -> int:
'''simple docstring'''
if self.trunk is None:
lowercase = TrunkConfig()
elif isinstance(self.trunk , _lowerCAmelCase ):
lowercase = TrunkConfig(**self.trunk )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = asdict(self )
lowercase = self.trunk.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = 48
__A = 1024
__A = 128
__A = 32
__A = 32
__A = 32
__A = 0
__A = 0
__A = False
__A = 4
__A = 128
__A = None
def _a ( self ) -> int:
'''simple docstring'''
if self.structure_module is None:
lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCAmelCase ):
lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase = self.sequence_state_dim // self.sequence_head_width
lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = asdict(self )
lowercase = self.structure_module.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = 384
__A = 128
__A = 16
__A = 128
__A = 12
__A = 4
__A = 8
__A = 0.1
__A = 8
__A = 1
__A = 2
__A = 7
__A = 10
__A = 1e-8
__A = 1e5
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return asdict(self )
def SCREAMING_SNAKE_CASE ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__A = 42
__A = None
__A = None
lowercase_ : str = namedtuple('''CoinsDistribResult''', '''moves excess''')
def SCREAMING_SNAKE_CASE ( lowercase_ : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase = get_distrib(node.left )
lowercase , lowercase = get_distrib(node.right )
lowercase = 1 - left_distrib_excess
lowercase = 1 - right_distrib_excess
lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ , lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ) -> Optional[int]:
'''simple docstring'''
lowercase = size if size is not None else {"""shortest_edge""": 20}
lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = MobileViTImageProcessor if is_vision_available() else None
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = MobileViTImageProcessingTester(self )
@property
def _a ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_flip_channel_order""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 653 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=True , _lowerCAmelCase=1 / 255 , _lowerCAmelCase=True , ) -> Any:
'''simple docstring'''
lowercase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def _a ( self ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
'''simple docstring'''
if not batched:
lowercase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size["""shortest_edge"""] * h / w )
lowercase = self.size["""shortest_edge"""]
elif w > h:
lowercase = self.size["""shortest_edge"""]
lowercase = int(self.size["""shortest_edge"""] * w / h )
else:
lowercase = self.size["""shortest_edge"""]
lowercase = self.size["""shortest_edge"""]
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = YolosImageProcessor if is_vision_available() else None
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = YolosImageProcessingTester(self )
@property
def _a ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
lowercase = self.image_processing_class(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_rescale=_lowerCAmelCase )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase = image_processing_a.pad(_lowerCAmelCase , return_tensors="""pt""" )
lowercase = image_processing_a(_lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowercase = json.loads(f.read() )
lowercase = {"""image_id""": 3_9769, """annotations""": target}
# encode them
lowercase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowercase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCAmelCase ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCAmelCase ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCAmelCase ) )
# verify class_labels
lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCAmelCase ) )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCAmelCase ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCAmelCase ) )
@slow
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowercase = json.loads(f.read() )
lowercase = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
lowercase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowercase = YolosImageProcessor(format="""coco_panoptic""" )
lowercase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCAmelCase ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCAmelCase ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCAmelCase ) )
# verify class_labels
lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCAmelCase ) )
# verify masks
lowercase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCAmelCase )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCAmelCase ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCAmelCase ) )
| 653 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = 0
for ch in input_str:
lowercase = ord(lowercase_ )
lowercase = pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''Salesforce/blip-image-captioning-base'''
__A = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__A = '''image_captioner'''
__A = AutoModelForVisionaSeq
__A = ['''image''']
__A = ['''text''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
return self.pre_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
return self.model.generate(**_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )[0].strip()
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 653 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = [0] * len(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
# use last results for better performance - dynamic programming
lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return max(prefix_function(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''longformer'''
def __init__( self , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0522 , _lowerCAmelCase = 768 , _lowerCAmelCase = 12 , _lowerCAmelCase = 12 , _lowerCAmelCase = 3072 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = attention_window
lowercase = sep_token_id
lowercase = bos_token_id
lowercase = eos_token_id
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = onnx_export
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = True
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase = super().outputs
if self.task == "default":
lowercase = {0: """batch"""}
return outputs
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def _a ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowercase = 1
return inputs
| 653 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
lowercase = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
lowercase = [5, 11, 17, 23]
lowercase = [256, 512, 1024, 1024]
lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase = 768
lowercase = [1, 1, 1, 0.5]
lowercase = [256, 512, 768, 768]
lowercase = 150
lowercase = 16
lowercase = (1, 384, 384)
lowercase = False
lowercase = """project"""
if "ade" in checkpoint_url:
lowercase = True
lowercase = 768
lowercase = [1, 1, 1, 0.5]
lowercase = 150
lowercase = 16
lowercase = """huggingface/label-files"""
lowercase = """ade20k-id2label.json"""
lowercase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowercase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowercase = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowercase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowercase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowercase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowercase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowercase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowercase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowercase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowercase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowercase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowercase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowercase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowercase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowercase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowercase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowercase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowercase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowercase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowercase = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowercase = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowercase = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowercase = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowercase = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowercase = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowercase = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[: config.hidden_size, :]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int ):
lowercase , lowercase = get_dpt_config(lowercase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowercase_ )
lowercase = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
lowercase = DPTForSemanticSegmentation(lowercase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
lowercase = 480 if """ade""" in checkpoint_url else 384
lowercase = DPTImageProcessor(size=lowercase_ )
lowercase = prepare_img()
lowercase = image_processor(lowercase_ , return_tensors="""pt""" )
# forward pass
lowercase = model(**lowercase_ ).logits if """ade""" in checkpoint_url else model(**lowercase_ ).predicted_depth
if show_prediction:
lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=lowercase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowercase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowercase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 653 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase (_UpperCAmelCase ):
__A = (UnCLIPScheduler,)
def _a ( self , **_lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self ) -> str:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.001_0011 < 1E-5
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def _a ( self ) -> Tuple:
'''simple docstring'''
pass
def _a ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 653 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''flax''', '''transformers''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''flax''', '''transformers''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''flax''', '''transformers''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''flax''', '''transformers''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = AltDiffusionPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Dict:
'''simple docstring'''
if str(_lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(_lowerCAmelCase )
else:
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> str:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase = RobertaSeriesModelWithTransformation(_lowerCAmelCase )
lowercase = text_encoder
lowercase = AltDiffusionPipeline(**_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """A photo of an astronaut"""
lowercase = alt_pipe(**_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase = RobertaSeriesModelWithTransformation(_lowerCAmelCase )
lowercase = text_encoder
lowercase = AltDiffusionPipeline(**_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = alt_pipe(**_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowercase = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe([prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 653 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 1 |
'''simple docstring'''
from collections import namedtuple
lowercase_ : List[str] = namedtuple('''from_to''', '''from_ to''')
lowercase_ : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : str , lowercase_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(lowercase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(lowercase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 50 ):
lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase_ : Union[str, Any] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowercase_ : Any = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowercase_ : List[Any] = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase (datasets.Metric ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
lowercase = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowercase = """"""
while len(lowercase_ ) % 3 != 0:
lowercase = """0""" + bin_string
lowercase = [
bin_string[index : index + 3]
for index in range(len(lowercase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase = 0
for index, val in enumerate(lowercase_ ):
oct_val += int(2 ** (2 - index) * int(lowercase_ ) )
oct_string += str(lowercase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ):
return len(set(lowercase_ ) ) == len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : Union[int, Iterable[int]] , lowercase_ : bool , lowercase_ : int ):
def constraint_to_multiple_of(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]=None ):
lowercase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase = math.ceil(val / multiple ) * multiple
return x
lowercase = (output_size, output_size) if isinstance(lowercase_ , lowercase_ ) else output_size
lowercase , lowercase = get_image_size(lowercase_ )
lowercase , lowercase = output_size
# determine new height and width
lowercase = output_height / input_height
lowercase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase = scale_width
else:
# fit height
lowercase = scale_height
lowercase = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase_ )
lowercase = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase_ )
return (new_height, new_width)
class __UpperCamelCase (_UpperCAmelCase ):
__A = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = False , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = size if size is not None else {"""height""": 384, """width""": 384}
lowercase = get_size_dict(_lowerCAmelCase )
lowercase = do_resize
lowercase = size
lowercase = keep_aspect_ratio
lowercase = ensure_multiple_of
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = 1 , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase = get_resize_output_image_size(
_lowerCAmelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_lowerCAmelCase , multiple=_lowerCAmelCase , )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(_lowerCAmelCase )
lowercase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCAmelCase ):
lowercase = target_sizes.numpy()
lowercase = []
for idx in range(len(_lowerCAmelCase ) ):
lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCAmelCase )
lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
lowercase = logits.argmax(dim=1 )
lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''nllb-moe'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCAmelCase=12_8112 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=0.05 , _lowerCAmelCase=0.05 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=1024 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="float32" , _lowerCAmelCase=False , _lowerCAmelCase=128 , _lowerCAmelCase=64 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=0.001 , _lowerCAmelCase=0.001 , _lowerCAmelCase="all" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , _lowerCAmelCase=0.2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase ):
__A = '''nat'''
__A = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=64 , _lowerCAmelCase=[3, 4, 6, 5] , _lowerCAmelCase=[2, 4, 8, 16] , _lowerCAmelCase=7 , _lowerCAmelCase=3.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = patch_size
lowercase = num_channels
lowercase = embed_dim
lowercase = depths
lowercase = len(_lowerCAmelCase )
lowercase = num_heads
lowercase = kernel_size
lowercase = mlp_ratio
lowercase = qkv_bias
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = drop_path_rate
lowercase = hidden_act
lowercase = layer_norm_eps
lowercase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
lowercase = layer_scale_init_value
lowercase = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : List[str] = 16
lowercase_ : Union[str, Any] = 32
def SCREAMING_SNAKE_CASE ( lowercase_ : Accelerator , lowercase_ : int = 16 ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : Any = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
lowercase = 2
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase_ )
def inner_training_loop(lowercase_ : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowercase_ )
lowercase , lowercase = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowercase_ )
lowercase = outputs.loss
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
from math import factorial
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = [1] * rank
else:
lowercase = rank
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase = self.duals.copy()
lowercase = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
__A = __add__
def __sub__( self , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
__A = __mul__
def __truediv__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowercase = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
if not callable(lowercase_ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(lowercase_ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowercase = Dual(lowercase_ , 1 )
lowercase = func(lowercase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 653 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Optional[int] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = os.path.join(args.tf_model_dir , """parameters.json""" )
lowercase = json.loads(open(lowercase_ ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
lowercase = args.output + """.pt"""
lowercase = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase = tf.train.load_checkpoint(args.tf_model_dir )
lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase = reader.get_tensor(lowercase_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase = 8
lowercase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/moe""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase = key_name[-9:-7]
for i in range(16 ):
lowercase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/mlp""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p1/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/ln""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/att""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase = state[:, 0, :, :]
lowercase = state[:, 1, :, :]
lowercase = state[:, 2, :, :]
lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/o/kernel""" ):
lowercase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/an""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase = """model.%s.weight""" % nlayer
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
if key_name.startswith("""model/wte""" ):
lowercase = """lm_head.weight"""
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/wob""" ):
lowercase = """final_logits_bias"""
lowercase = vnp.copy() # same in embedded
lowercase = state.reshape((1, -1) )
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense/kernel":
lowercase = """model.last_project.weight"""
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense_1/bias":
lowercase = """model.last_project.bias"""
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
torch.save(lowercase_ , args.output )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 653 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = []
for line in lines:
lowercase = re.sub(R"""#.*""" , """""" , lowercase_ ) # remove comments
if line:
filtered_lines.append(lowercase_ )
lowercase = """\n""".join(lowercase_ )
# Make a hash from all this code
lowercase = full_str.encode("""utf-8""" )
return shaaaa(lowercase_ ).hexdigest()
# get importable module names and hash for caching
lowercase_ : str = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase_ : List[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase_ : List[str] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
lowercase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 653 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ : List[str] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] ):
lowercase = []
for i in range(len(lowercase_ ) ):
lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase_ )
return next_generation
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] , lowercase_ : int ):
lowercase = []
for _ in range(lowercase_ ):
# Create output image
lowercase = Image.new("""RGB""" , (len(cells[0] ), len(lowercase_ )) )
lowercase = img.load()
# Save cells to image
for x in range(len(lowercase_ ) ):
for y in range(len(cells[0] ) ):
lowercase = 255 - cells[y][x] * 255
lowercase = (colour, colour, colour)
# Save image
images.append(lowercase_ )
lowercase = new_generation(lowercase_ )
return images
if __name__ == "__main__":
lowercase_ : Tuple = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 653 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''torch''', '''torchsde''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 653 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] , lowercase_ : int ):
lowercase = []
lowercase = []
lowercase = 0
lowercase = sum(lowercase_ )
create_state_space_tree(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int , lowercase_ : list[int] , lowercase_ : list[list[int]] , lowercase_ : int , ):
if sum(lowercase_ ) > max_sum or (remaining_nums_sum + sum(lowercase_ )) < max_sum:
return
if sum(lowercase_ ) == max_sum:
result.append(lowercase_ )
return
for index in range(lowercase_ , len(lowercase_ ) ):
create_state_space_tree(
lowercase_ , lowercase_ , index + 1 , [*path, nums[index]] , lowercase_ , remaining_nums_sum - nums[index] , )
lowercase_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
lowercase_ : Any = 9
lowercase_ : List[str] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 653 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int="attention" ):
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[Any]=False ):
if split_mlp_wi:
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase = (wi_a, wi_a)
else:
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : int ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def SCREAMING_SNAKE_CASE ( lowercase_ : dict , *, lowercase_ : int , lowercase_ : bool ):
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase_ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowercase_ , lowercase_ , """encoder""" , lowercase_ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowercase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowercase_ , lowercase_ , """decoder""" , lowercase_ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old["""decoder/decoder_norm/scale"""]
lowercase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : bool ):
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Any ):
lowercase = checkpoints.load_tax_checkpoint(lowercase_ )
lowercase = convert_tax_to_pytorch(lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ )
lowercase = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : bool = False ):
lowercase = TaConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = TaEncoderModel(lowercase_ )
else:
lowercase = TaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("""Done""" )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowercase_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
__A = XGLMConfig
__A = {}
__A = '''gelu'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Optional[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = ffn_dim
lowercase = activation_function
lowercase = activation_dropout
lowercase = attention_dropout
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = None
lowercase = 0
lowercase = 2
lowercase = 1
def _a ( self ) -> Dict:
'''simple docstring'''
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = self.get_config()
lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCAmelCase , )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__A = (TFXGLMForCausalLM,) if is_tf_available() else ()
__A = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
def _a ( self ) -> int:
'''simple docstring'''
lowercase = TFXGLMModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFXGLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _a ( self ) -> int:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self , _lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowercase = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , seed=[7, 0] )
lowercase = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
lowercase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase = """left"""
# use different length sentences to test batching
lowercase = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowercase = tokenizer(_lowerCAmelCase , return_tensors="""tf""" , padding=_lowerCAmelCase )
lowercase = inputs["""input_ids"""]
lowercase = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowercase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowercase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 )
lowercase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowercase = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=12 )
lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase )
lowercase = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
| 653 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase_ : int = None
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : List[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase_ : Dict = {
'''camembert-base''': 512,
}
lowercase_ : Union[str, Any] = '''▁'''
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = CamembertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 653 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
lowercase = self.values[key]
def _a ( self ) -> Any:
'''simple docstring'''
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 653 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=sys.maxsize ) -> Dict:
'''simple docstring'''
lowercase = """bilinear"""
lowercase = max_size
lowercase = short_edge_length
def __call__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(_lowerCAmelCase , _lowerCAmelCase )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(_lowerCAmelCase , _lowerCAmelCase ) > self.max_size:
lowercase = self.max_size * 1.0 / max(_lowerCAmelCase , _lowerCAmelCase )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(_lowerCAmelCase )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(_lowerCAmelCase )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
_lowerCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_lowerCAmelCase ).squeeze(0 )
img_augs.append(_lowerCAmelCase )
return img_augs
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda _lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = tuple(max(_lowerCAmelCase ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
_lowerCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_lowerCAmelCase , _lowerCAmelCase )
]
return torch.stack(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = [images]
if single_image:
assert len(_lowerCAmelCase ) == 1
for i in range(len(_lowerCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_lowerCAmelCase , images.pop(_lowerCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_lowerCAmelCase , torch.as_tensor(img_tensorize(images.pop(_lowerCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(_lowerCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(_lowerCAmelCase ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(_lowerCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(_lowerCAmelCase , _lowerCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Dict ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Tuple[int, int] ):
assert torch.isfinite(lowercase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowercase_ )
tensor[:, 1].clamp_(min=0 , max=lowercase_ )
tensor[:, 2].clamp_(min=0 , max=lowercase_ )
tensor[:, 3].clamp_(min=0 , max=lowercase_ )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=36 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1000 , ) -> Dict:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> int:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
lowercase = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = False
__A = False
__A = False
__A = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return True
def _a ( self ) -> int:
'''simple docstring'''
lowercase = LayoutLMvaModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def _a ( self ) -> int:
'''simple docstring'''
lowercase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCAmelCase )
lowercase = torch.tensor([[1, 2]] )
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
lowercase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Dict = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''rwkv'''
__A = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _lowerCAmelCase=5_0277 , _lowerCAmelCase=1024 , _lowerCAmelCase=4096 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=6 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = context_length
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase = layer_norm_epsilon
lowercase = rescale_every
lowercase = use_cache
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(
tie_word_embeddings=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase = test_metrics
@require_cpu
def _a ( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> List[str]:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowercase = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from string import ascii_uppercase
lowercase_ : List[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
lowercase_ : List[Any] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = len(lowercase_ )
lowercase = 0
while True:
if x == i:
lowercase = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = """"""
lowercase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = """"""
lowercase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE ( ):
lowercase = """THE GERMAN ATTACK"""
lowercase = """SECRET"""
lowercase = generate_key(lowercase_ , lowercase_ )
lowercase = cipher_text(lowercase_ , lowercase_ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(lowercase_ , lowercase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Any = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = AlbertTokenizer
__A = AlbertTokenizerFast
__A = True
__A = True
__A = True
def _a ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = AlbertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = """this is a test"""
lowercase = """this is a test"""
return input_text, output_text
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = """<pad>"""
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(_lowerCAmelCase ) , 3_0000 )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _a ( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = """I was born in 92000, and this is falsé."""
lowercase = tokenizer.tokenize(_lowerCAmelCase )
lowercase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowercase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(_lowerCAmelCase )
lowercase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = AlbertTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [48, 25, 21, 1289] )
lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = AlbertTokenizer(_lowerCAmelCase )
lowercase = tokenizer.encode("""sequence builders""" )
lowercase = tokenizer.encode("""multi-sequence build""" )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 653 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 653 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.