code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = os.path.join(
UpperCamelCase__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCamelCase__ ) , UpperCamelCase__ , ) , )
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase = cached_features_file + '.lock'
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase = torch.load(UpperCamelCase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase = (
processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
)
logger.info('Training examples: %s' , len(UpperCamelCase__ ) )
UpperCamelCase = hans_convert_examples_to_features(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Saving features into cached file %s' , UpperCamelCase__ )
torch.save(self.features , UpperCamelCase__ )
def __len__( self : Any ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.features[i]
def A ( self : Any ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = 1_2_8 , UpperCamelCase__ : Any=False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
UpperCamelCase = processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
UpperCamelCase = hans_convert_examples_to_features(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCamelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase = tf.data.Dataset.from_generator(
UpperCamelCase__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A ( self : Dict ):
"""simple docstring"""
return self.dataset
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.features[i]
def A ( self : Tuple ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__ , 'heuristics_train_set.txt' ) ) , 'train' )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A ( self : Optional[Any] ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = []
for i, line in enumerate(UpperCamelCase__ ):
if i == 0:
continue
UpperCamelCase = '%s-%s' % (set_type, line[0])
UpperCamelCase = line[5]
UpperCamelCase = line[6]
UpperCamelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase = line[0]
examples.append(InputExample(guid=UpperCamelCase__ , text_a=UpperCamelCase__ , text_b=UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) )
return examples
def __lowerCamelCase ( A__ , A__ , A__ , A__ , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {label: i for i, label in enumerate(A__ )}
UpperCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(A__ ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=A__ , max_length=A__ , padding='max_length' , truncation=A__ , return_overflowing_tokens=A__ , )
UpperCamelCase = label_map[example.label] if example.label in label_map else 0
UpperCamelCase = int(example.pairID )
features.append(InputFeatures(**A__ , label=A__ , pairID=A__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_lowerCamelCase : Any = {
"hans": 3,
}
_lowerCamelCase : Optional[int] = {
"hans": HansProcessor,
}
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCamelCase = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCamelCase = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[Any]="auto" , SCREAMING_SNAKE_CASE_ : str=-1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.9 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_00 , SCREAMING_SNAKE_CASE_ : str="gpt2-large" , SCREAMING_SNAKE_CASE_ : List[str]=-1 , SCREAMING_SNAKE_CASE_ : List[str]=10_24 , SCREAMING_SNAKE_CASE_ : str=25 , SCREAMING_SNAKE_CASE_ : List[Any]=5 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=25 , ) -> List[Any]:
'''simple docstring'''
A: str = compute_mauve(
p_text=SCREAMING_SNAKE_CASE_ , q_text=SCREAMING_SNAKE_CASE_ , p_features=SCREAMING_SNAKE_CASE_ , q_features=SCREAMING_SNAKE_CASE_ , p_tokens=SCREAMING_SNAKE_CASE_ , q_tokens=SCREAMING_SNAKE_CASE_ , num_buckets=SCREAMING_SNAKE_CASE_ , pca_max_data=SCREAMING_SNAKE_CASE_ , kmeans_explained_var=SCREAMING_SNAKE_CASE_ , kmeans_num_redo=SCREAMING_SNAKE_CASE_ , kmeans_max_iter=SCREAMING_SNAKE_CASE_ , featurize_model_name=SCREAMING_SNAKE_CASE_ , device_id=SCREAMING_SNAKE_CASE_ , max_text_length=SCREAMING_SNAKE_CASE_ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE_ , mauve_scaling_factor=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , )
return out
| 334 |
'''simple docstring'''
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
# fetching a list of articles in json format
A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 334 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _A ( A__ ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__lowercase = set()
__lowercase = 42
__lowercase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _A ( A__ = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowercase, __lowercase= 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase= failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0]
__lowercase= 0
__lowercase= 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase= failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase = '''abc1abc12'''
lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase = '''ABABX'''
lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase = '''AAAB'''
lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase = '''abcdabcy'''
lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 295 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 354 |
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCamelCase__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ = logging.getLogger(__name__)
lowercase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = field(
default=lowercase, metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """The input training data file (a text file)."""} )
lowerCamelCase__ = field(
default=lowercase, metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowerCamelCase__ = field(
default=0.15, metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowerCamelCase__ = field(
default=1 / 6, metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
}, )
lowerCamelCase__ = field(
default=5, metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowerCamelCase__ = field(
default=-1, metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
}, )
lowerCamelCase__ = field(
default=lowercase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = None , ):
def _dataset(lowercase__ , lowercase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , ref_path=lowercase__ , )
return LineByLineTextDataset(tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_lowerCamelCase : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
_lowerCamelCase : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
_lowerCamelCase : int = AutoModelWithLMHead.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
_lowerCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_lowerCamelCase : Tuple = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_lowerCamelCase : Optional[Any] = (
get_dataset(lowercase__ , tokenizer=lowercase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_lowerCamelCase : Tuple = (
get_dataset(lowercase__ , tokenizer=lowercase__ , evaluate=lowercase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_lowerCamelCase : List[str] = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_lowerCamelCase : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
else:
_lowerCamelCase : Dict = DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_lowerCamelCase : str = Trainer(
model=lowercase__ , args=lowercase__ , data_collator=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , prediction_loss_only=lowercase__ , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : Optional[int] = trainer.evaluate()
_lowerCamelCase : int = math.exp(eval_output['eval_loss'] )
_lowerCamelCase : str = {'perplexity': perplexity}
_lowerCamelCase : str = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowercase__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowercase__ )
return results
def _snake_case ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 96 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _snake_case ( _snake_case : List[Any]=None , _snake_case : Optional[int]=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_snake_case )
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCAmelCase : Optional[List[str]] = list_field(
default=__lowerCAmelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _snake_case ( _snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
try:
int(_snake_case )
return True
except ValueError:
return False
def _snake_case ( _snake_case : Dict ) -> str:
'''simple docstring'''
try:
float(_snake_case )
return True
except ValueError:
return False
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = args
_A = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_A = csv.DictReader(_UpperCAmelCase )
for row in reader:
_A = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_A = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_A = float(row['result'] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A , _A = plt.subplots()
_A = 'Time usage' if self.args.is_time else 'Memory usage'
_A = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_A = sorted(set(self.result_dict[model_name]['bsz'] ) )
_A = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_A = self.result_dict[model_name]['result']
((_A) , (_A)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
_A = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_A = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '--' )
title_str += F''' {label_model_name} vs.'''
_A = title_str[:-4]
_A = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _snake_case ( ) -> Any:
'''simple docstring'''
_A = HfArgumentParser(_snake_case )
_A = parser.parse_args_into_dataclasses()[0]
_A = Plot(args=_snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 352 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase : List[Any] = ['''accelerate''', '''launch''']
UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase : Union[str, Any] = config_folder / config_file
UpperCAmelCase : Union[str, Any] = config_folder / '''_default_config.yaml'''
UpperCAmelCase : List[Any] = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Any ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''test-tpu'''
UpperCAmelCase : Optional[int] = '''us-central1-a'''
UpperCAmelCase : List[str] = '''ls'''
UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
UpperCAmelCase : Optional[Any] = '''cd /usr/share'''
UpperCAmelCase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase : str = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase_ ( self : Any ):
_A = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
| 271 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Tuple =logging.get_logger(__name__)
class _lowercase ( A__ , A__ ):
a = """maskformer-swin"""
a = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self: Union[str, Any] , UpperCamelCase__: Optional[Any]=224 , UpperCamelCase__: int=4 , UpperCamelCase__: str=3 , UpperCamelCase__: Optional[Any]=96 , UpperCamelCase__: Tuple=[2, 2, 6, 2] , UpperCamelCase__: Any=[3, 6, 12, 24] , UpperCamelCase__: Tuple=7 , UpperCamelCase__: List[Any]=4.0 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Tuple=1e-5 , UpperCamelCase__: Dict=None , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[int] , ):
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : int = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : int = len(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : int = window_size
lowerCamelCase__ : Optional[int] = mlp_ratio
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Dict = drop_path_rate
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = use_absolute_embeddings
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
lowerCamelCase__ : Union[str, Any] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
lowerCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 41 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Tuple:
lowerCamelCase__ : List[str] =fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , snake_case_ ).groups()[0]
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =file_names
lowerCamelCase__ : str =image_transform
lowerCamelCase__ : str =label_to_id
def __len__( self :Optional[int] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self :Optional[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.file_names[idx]
lowerCamelCase__ : Dict =PIL.Image.open(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int =self.image_transform(lowerCamelCase_ )
lowerCamelCase__ : List[str] =extract_label(lowerCamelCase_ )
if self.label_to_id is not None:
lowerCamelCase__ : Optional[int] =self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) ->Dict:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[str] =int(config['seed'] )
lowerCamelCase__ : Dict =int(config['batch_size'] )
lowerCamelCase__ : Optional[int] =config['image_size']
if not isinstance(snake_case_ , (list, tuple) ):
lowerCamelCase__ : Optional[int] =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Optional[Any] =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Union[str, Any] =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : int =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Tuple =os.path.split(snake_case_ )[-1].split('.' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Grab all the image filenames
lowerCamelCase__ : List[str] =[os.path.join(args.data_dir , snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : str =[extract_label(snake_case_ ) for fname in file_names]
lowerCamelCase__ : Any =list(set(snake_case_ ) )
id_to_label.sort()
lowerCamelCase__ : List[Any] ={lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
lowerCamelCase__ : int =np.random.permutation(len(snake_case_ ) )
lowerCamelCase__ : Tuple =int(0.8 * len(snake_case_ ) )
lowerCamelCase__ : str =random_perm[:cut]
lowerCamelCase__ : Dict =random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : str =Compose([RandomResizedCrop(snake_case_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : Any =PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] =Compose([Resize(snake_case_ ), ToTensor()] )
lowerCamelCase__ : Dict =PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
lowerCamelCase__ : int =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Dict =create_model('resnet50d' , pretrained=snake_case_ , num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Dict =False
for param in model.get_classifier().parameters():
lowerCamelCase__ : List[str] =True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Any =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Dict =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int =torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict =OneCycleLR(optimizer=snake_case_ , max_lr=snake_case_ , epochs=snake_case_ , steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : int =0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : int =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Optional[int] =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Tuple =os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Union[str, Any] =int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Optional[int] =None
else:
lowerCamelCase__ : List[Any] =int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int =resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ , snake_case_ ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Tuple =accelerator.skip_first_batches(snake_case_ , snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : int ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] =(batch['image'] - mean) / std
lowerCamelCase__ : Any =model(snake_case_ )
lowerCamelCase__ : List[Any] =torch.nn.functional.cross_entropy(snake_case_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : int =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Union[str, Any] ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : int =(batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] =model(snake_case_ )
lowerCamelCase__ : Optional[int] =outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Union[str, Any] =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : List[str] =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(snake_case_ ),
'epoch': epoch,
} , step=snake_case_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple =f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Tuple =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : Any =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=snake_case_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=snake_case_ , default=snake_case_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=snake_case_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case_ , default=snake_case_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=snake_case_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Dict =parser.parse_args()
lowerCamelCase__ : List[str] ={'lr': 3E-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 126 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a : int = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = 0.9 , a__ = PILImageResampling.BICUBIC , a__ = True , a__ = None , a__ = 1 / 255 , a__ = True , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Any = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase : Optional[int] = get_size_dict(a__ , default_to_square=a__ )
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Union[str, Any] = get_size_dict(a__ , param_name="""crop_size""" )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : Union[str, Any] = crop_pct
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : Optional[int] = crop_size
_lowerCAmelCase : Any = do_rescale
_lowerCAmelCase : Dict = rescale_factor
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __A ( self , a__ , a__ , a__ = None , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
_lowerCAmelCase : Any = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
_lowerCAmelCase : List[Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_lowerCAmelCase : str = int(size["""height"""] / crop_pct )
else:
_lowerCAmelCase : Dict = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(a__ ) )
_lowerCAmelCase : str = get_resize_output_image_size(a__ , size=a__ , default_to_square=a__ )
else:
if "shortest_edge" in size:
_lowerCAmelCase : Optional[int] = get_resize_output_image_size(a__ , size=size["""shortest_edge"""] , default_to_square=a__ )
elif "height" in size and "width" in size:
_lowerCAmelCase : str = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(a__ ) )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : str = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size["""height"""], size["""width"""]) , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Tuple = crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase : str = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : Union[str, Any] = get_size_dict(a__ , default_to_square=a__ )
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Any = get_size_dict(a__ , param_name="""crop_size""" )
_lowerCAmelCase : List[str] = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : List[str] = [to_numpy_array(a__ ) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=a__ , size=a__ , crop_pct=a__ , resample=a__ ) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
_lowerCAmelCase : Union[str, Any] = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_lowerCAmelCase : List[Any] = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : int = {"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 126 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "maskformer-swin"
_UpperCamelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : int = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 126 | 1 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase_ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
__lowerCamelCase : Any = self.diffusers_dir
shutil.copy(
os.path.join(a , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def _snake_case ( self: List[Any] , a: Any , a: Dict , a: Any , a: Dict=None ):
__lowerCamelCase : List[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowerCamelCase : Union[str, Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowerCamelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__lowerCamelCase : Optional[int] = black.format_str(a , mode=a )
__lowerCamelCase : str = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(a , 'w' , newline='\n' ) as f:
f.write(a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=a )
with open(a , 'r' ) as f:
self.assertTrue(f.read() , a )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(a , a )
def _snake_case ( self: Tuple ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , a , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , a ) , )
# Copy consistency with a really long name
__lowerCamelCase : str = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('Bert' , a , a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , a , overwrite_result=re.sub('DDPM' , 'Test' , a ) , )
| 194 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 1 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : Optional[Any] = [0, 1]
_UpperCAmelCase : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_UpperCAmelCase : str = 0
for j in range(len(_lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCAmelCase = pd.read_csv("""sample_data.csv""", header=None)
_UpperCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCAmelCase = df.iloc[:, 1:2]
_UpperCAmelCase = actual_data.values.reshape(len_data, 1)
_UpperCAmelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCAmelCase = 10
_UpperCAmelCase = 5
_UpperCAmelCase = 20
_UpperCAmelCase = len_data - periods * look_back
_UpperCAmelCase = actual_data[:division]
_UpperCAmelCase = actual_data[division - look_back :]
_UpperCAmelCase ,_UpperCAmelCase = [], []
_UpperCAmelCase ,_UpperCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCAmelCase = np.array(train_x)
_UpperCAmelCase = np.array(test_x)
_UpperCAmelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCAmelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCAmelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_UpperCAmelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCAmelCase = model.predict(x_test)
| 192 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase=None , **lowercase ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
A_ : List[Any] = model
A_ : Dict = kwargs.get('model_save_dir' , lowercase )
A_ : List[str] = kwargs.get('latest_model_name' , lowercase )
def __call__( self , **lowercase ):
"""simple docstring"""
A_ : str = {k: np.array(lowercase ) for k, v in kwargs.items()}
return self.model.run(lowercase , lowercase )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
A_ : List[Any] = 'CPUExecutionProvider'
return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
A_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ : Optional[Any] = self.model_save_dir.joinpath(lowercase )
if src_path.exists():
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self , lowercase , **lowercase , ):
"""simple docstring"""
if os.path.isfile(lowercase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase , exist_ok=lowercase )
# saving model weights/files
self._save_pretrained(lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase ):
A_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase )
A_ : Dict = Path(lowercase )
# load model from hub
else:
# download model
A_ : List[str] = hf_hub_download(
repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , )
A_ : int = Path(lowercase ).parent
A_ : Optional[Any] = Path(lowercase ).name
A_ : Any = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase )
return cls(model=lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = None
if len(str(lowercase ).split('@' ) ) == 2:
A_ , A_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
| 192 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 131 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : KarrasVeScheduler
def __init__( self : Union[str, Any] ,_a : UNetaDModel ,_a : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] ,_a : int = 1 ,_a : int = 50 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,**_a : List[Any] ,):
'''simple docstring'''
_a : Any = self.unet.config.sample_size
_a : Optional[int] = (batch_size, 3, img_size, img_size)
_a : Dict = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_a : Dict = randn_tensor(_a ,generator=_a ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_a : Optional[int] = self.scheduler.schedule[t]
_a : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_a, _a : List[Any] = self.scheduler.add_noise_to_input(_a ,_a ,generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_a : Tuple = self.scheduler.step(_a ,_a ,_a ,_a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_a : Optional[Any] = self.scheduler.step_correct(
_a ,_a ,_a ,_a ,step_output.prev_sample ,step_output['derivative'] ,)
_a : Dict = step_output.prev_sample
_a : Tuple = (sample / 2 + 0.5).clamp(0 ,1 )
_a : Optional[Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 271 | 0 |
def UpperCamelCase_( lowerCamelCase_ ) -> set:
_lowercase : Any = set()
# edges = list of graph's edges
_lowercase : int = get_edges(lowerCamelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowercase , _lowercase : Any = edges.pop()
chosen_vertices.add(lowerCamelCase_ )
chosen_vertices.add(lowerCamelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase_ )
return chosen_vertices
def UpperCamelCase_( lowerCamelCase_ ) -> set:
_lowercase : int = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 84 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCamelCase_ :pyspark.sql.DataFrame , lowerCamelCase_ :Optional[NamedSplit] = None , lowerCamelCase_ :Optional[Features] = None , lowerCamelCase_ :bool = True , lowerCamelCase_ :str = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :str = None , lowerCamelCase_ :bool = True , lowerCamelCase_ :str = "arrow" , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
super().__init__(
split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =load_from_cache_file
lowerCamelCase__ : Tuple =file_format
lowerCamelCase__ : List[str] =Spark(
df=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , working_dir=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase__ : Union[str, Any] =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 126 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """beit"""
def __init__( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=8_192 , lowerCamelCase_ :Dict=768 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :str=3_072 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.02 , lowerCamelCase_ :int=1e-12 , lowerCamelCase_ :str=224 , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :str=False , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=True , lowerCamelCase_ :Tuple=[3, 5, 7, 11] , lowerCamelCase_ :str=[1, 2, 3, 6] , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=0.4 , lowerCamelCase_ :List[str]=256 , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :int=255 , **lowerCamelCase_ :List[str] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any =vocab_size
lowerCamelCase__ : List[str] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : List[Any] =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] =initializer_range
lowerCamelCase__ : List[str] =layer_norm_eps
lowerCamelCase__ : Dict =image_size
lowerCamelCase__ : int =patch_size
lowerCamelCase__ : Union[str, Any] =num_channels
lowerCamelCase__ : Tuple =use_mask_token
lowerCamelCase__ : Any =use_absolute_position_embeddings
lowerCamelCase__ : str =use_relative_position_bias
lowerCamelCase__ : int =use_shared_relative_position_bias
lowerCamelCase__ : Optional[int] =layer_scale_init_value
lowerCamelCase__ : Any =drop_path_rate
lowerCamelCase__ : Dict =use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : List[Any] =pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Tuple =use_auxiliary_head
lowerCamelCase__ : List[str] =auxiliary_loss_weight
lowerCamelCase__ : Tuple =auxiliary_channels
lowerCamelCase__ : Union[str, Any] =auxiliary_num_convs
lowerCamelCase__ : Optional[Any] =auxiliary_concat_input
lowerCamelCase__ : Optional[int] =semantic_loss_ignore_index
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return 1e-4 | 126 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase = 50_0000
UpperCamelCase , UpperCamelCase = os.path.split(__file__)
UpperCamelCase = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _A ( lowerCAmelCase_ : datasets.Dataset , **lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = dataset.map(**lowerCAmelCase_ )
@get_duration
def _A ( lowerCAmelCase_ : datasets.Dataset , **lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = dataset.filter(**lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
lowerCAmelCase__ = generate_example_dataset(
os.path.join(lowerCAmelCase_ , "dataset.arrow" ) , lowerCAmelCase_ , num_examples=lowerCAmelCase_ )
lowerCAmelCase__ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowerCAmelCase_ )
def tokenize(lowerCAmelCase_ : int ):
return tokenizer(examples["text"] )
lowerCAmelCase__ = map(lowerCAmelCase_ )
lowerCAmelCase__ = map(lowerCAmelCase_ , batched=lowerCAmelCase_ )
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lambda lowerCAmelCase_ : None , batched=lowerCAmelCase_ )
with dataset.formatted_as(type="numpy" ):
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lambda lowerCAmelCase_ : None , batched=lowerCAmelCase_ )
with dataset.formatted_as(type="pandas" ):
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lambda lowerCAmelCase_ : None , batched=lowerCAmelCase_ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lambda lowerCAmelCase_ : None , batched=lowerCAmelCase_ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lambda lowerCAmelCase_ : None , batched=lowerCAmelCase_ )
lowerCAmelCase__ = map(lowerCAmelCase_ , function=lowerCAmelCase_ , batched=lowerCAmelCase_ )
lowerCAmelCase__ = filter(lowerCAmelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCAmelCase_ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 221 |
from maths.prime_factors import prime_factors
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> set:
"""simple docstring"""
_UpperCamelCase = set()
# edges = list of graph's edges
_UpperCamelCase = get_edges(__snake_case )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase , _UpperCamelCase = edges.pop()
chosen_vertices.add(__snake_case )
chosen_vertices.add(__snake_case )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__snake_case )
return chosen_vertices
def lowerCamelCase__ ( __snake_case ) -> set:
"""simple docstring"""
_UpperCamelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 90 |
import argparse
import os
import re
import packaging.version
lowercase__ ='examples/'
lowercase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase__ ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase__ ='README.md'
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.read()
__a , __a : Optional[int] = REPLACE_PATTERNS[pattern]
__a : List[Any] = replace.replace('''VERSION''' , lowerCAmelCase__ )
__a : Any = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
__a : int = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.readlines()
# Find the start of the list.
__a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def __UpperCamelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a : Optional[int] = f.read()
__a : str = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=False ):
__a : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a : Union[str, Any] = default_version.base_version
elif patch:
__a : Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a : List[str] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a : List[str] = input(f"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Tuple = default_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ):
__a : Dict = get_version()
__a : str = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a : Any = current_version.base_version
# Check with the user we got that right.
__a : Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Any = dev_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 90 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''dpr'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0 , A__="absolute" , A__ = 0 , **A__ , ):
super().__init__(pad_token_id=A__ , **A__ )
A__ : Any = vocab_size
A__ : Optional[int] = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : Union[str, Any] = hidden_act
A__ : List[str] = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : List[str] = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : List[str] = projection_dim
A__ : Optional[int] = position_embedding_type
| 192 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self ):
_lowercase : Any = 1
_lowercase : str = 3
_lowercase : Any = (32, 32)
_lowercase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : int = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def lowerCamelCase__ ( self ):
def extract(*UpperCAmelCase_ ,**UpperCAmelCase_ ):
class UpperCamelCase :
"""simple docstring"""
def __init__( self ):
_lowercase : Dict = torch.ones([0] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def lowerCamelCase__ ( self ):
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.dummy_cond_unet
_lowercase : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowercase : str = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowercase : List[str] = 77
_lowercase : List[str] = self.dummy_image.to(lowerCAmelCase__ )
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
_lowercase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
_lowercase : Tuple = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : Dict = "A painting of a squirrel eating a burger"
_lowercase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowercase : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,)
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowercase : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,return_dict=lowerCAmelCase__ ,)[0]
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.dummy_cond_unet
_lowercase : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowercase : List[Any] = self.dummy_vae
_lowercase : Tuple = self.dummy_text_encoder
_lowercase : str = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowercase : List[Any] = 77
_lowercase : Tuple = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
_lowercase : Optional[int] = unet.half()
_lowercase : int = vae.half()
_lowercase : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Dict = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
_lowercase : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : List[Any] = "A painting of a squirrel eating a burger"
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def lowerCamelCase__ ( self ):
_lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04) )
_lowercase : Dict = "BAAI/AltDiffusion"
_lowercase : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_lowercase : str = "A fantasy landscape, trending on artstation"
_lowercase : str = torch.manual_seed(0 )
_lowercase : Any = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="""np""" ,)
_lowercase : Optional[Any] = output.images[0]
_lowercase : List[str] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
_lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowercase : Optional[int] = init_image.resize((7_68, 5_12) )
_lowercase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowercase : List[str] = "BAAI/AltDiffusion"
_lowercase : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_lowercase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : str = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="""np""" ,)
_lowercase : str = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCAmelCase = 2_99_79_24_58
# Symbols
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = symbols('ct x y z')
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(lowercase__ ) ** 2 )
def _snake_case ( lowercase__ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(lowercase__ ), -gamma(lowercase__ ) * beta(lowercase__ ), 0, 0],
[-gamma(lowercase__ ) * beta(lowercase__ ), gamma(lowercase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _snake_case ( lowercase__ : float , lowercase__ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
lowerCAmelCase_ :Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCAmelCase = transform(29_97_92_45)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
__UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 84 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase__ :
def __init__( self : List[Any] , snake_case__ : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Dict ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = root
def __str__( self : Any ):
'''simple docstring'''
return str(self.root )
def __a ( self : int , snake_case__ : Node , snake_case__ : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : List[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
UpperCAmelCase__ : int = new_children
else:
UpperCAmelCase__ : int = new_children
else:
UpperCAmelCase__ : Tuple = new_children
def __a ( self : Optional[Any] , snake_case__ : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self : List[Any] ):
'''simple docstring'''
return self.root is None
def __a ( self : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : str = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Any = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : List[str] = new_node
break
else:
UpperCAmelCase__ : Any = parent_node.right
UpperCAmelCase__ : int = parent_node
def __a ( self : Dict , *snake_case__ : Union[str, Any] ):
'''simple docstring'''
for value in values:
self.__insert(snake_case__ )
def __a ( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
UpperCAmelCase__ : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : Union[str, Any] = node.left if value < node.value else node.right
return node
def __a ( self : str , snake_case__ : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Any = node.right
return node
def __a ( self : Optional[int] , snake_case__ : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : List[Any] = self.root
while node.left is not None:
UpperCAmelCase__ : List[Any] = node.left
return node
def __a ( self : Tuple , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ , snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ , node.left )
else:
UpperCAmelCase__ : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self : Union[str, Any] , snake_case__ : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self : Any , snake_case__ : Tuple=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self : int , snake_case__ : list , snake_case__ : Node | None ):
'''simple docstring'''
if node:
self.inorder(snake_case__ , node.left )
arr.append(node.value )
self.inorder(snake_case__ , node.right )
def __a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(snake_case__ , snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE__ ( snake_case : Node | None )-> list[Node]:
'''simple docstring'''
UpperCAmelCase__ : Dict = []
if curr_node is not None:
UpperCAmelCase__ : List[str] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Dict = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(snake_case )
# Prints all the elements of the list in order traversal
print(snake_case )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 351 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ = array[indexa], array[indexa]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if length > 1:
A__ = int(length / 2 )
for i in range(UpperCamelCase__ , low + middle ):
comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if length > 1:
A__ = int(length / 2 )
bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCamelCase = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 221 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> int:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> Union[str, Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 221 | 1 |
'''simple docstring'''
import socket
def _A ( ) -> Union[str, Any]:
_lowercase : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowercase : Optional[Any] = socket.gethostname()
_lowercase : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
_lowercase : List[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 199 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 199 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> str:
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = embedding_size
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertModel(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForMaskedLM(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForNextSentencePrediction(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForPreTraining(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFMobileBertForSequenceClassification(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFMobileBertForMultipleChoice(config=lowerCamelCase__ )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFMobileBertForTokenClassification(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForQuestionAnswering(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__lowerCamelCase = TFMobileBertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 6, 30_522]
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 90 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Any) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ={
"num_train_timesteps": 1_000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config(variance_type="fixed_small_log")
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0_0_0_0E-1_0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.054_9625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.999_4987)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Dict =self.get_scheduler_config(variance_type="learned_range")
lowerCamelCase__: List[str] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCAmelCase_) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCAmelCase_) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCAmelCase_) - -0.001_0011 < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: List[str] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =scheduler.timesteps
lowerCamelCase__: Dict =self.dummy_model()
lowerCamelCase__: List[str] =self.dummy_sample_deter
lowerCamelCase__: Any =torch.manual_seed(0)
for i, t in enumerate(UpperCAmelCase_):
# 1. predict noise residual
lowerCamelCase__: Dict =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Union[str, Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: int =pred_prev_sample
lowerCamelCase__: Dict =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[Any] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 252.268_2495) < 1E-2
assert abs(result_mean.item() - 0.328_4743) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.scheduler_classes[0]
lowerCamelCase__: str =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(25)
lowerCamelCase__: Tuple =scheduler.timesteps
lowerCamelCase__: Tuple =self.dummy_model()
lowerCamelCase__: Dict =self.dummy_sample_deter
lowerCamelCase__: Tuple =torch.manual_seed(0)
for i, t in enumerate(UpperCAmelCase_):
# 1. predict noise residual
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , UpperCAmelCase_)
if i + 1 == timesteps.shape[0]:
lowerCamelCase__: Optional[Any] =None
else:
lowerCamelCase__: Dict =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Any =scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: str =pred_prev_sample
lowerCamelCase__: Optional[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.204_4983) < 1E-2
assert abs(result_mean.item() - 0.336_2038) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
pass
| 273 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0) ->None:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =row, column
lowerCamelCase__: List[str] =[[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__(self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__: List[str] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__: int =max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
lowerCamelCase__: Any =F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase_ : list[float]) -> str:
nonlocal string_format_identifier
lowerCamelCase__: Tuple ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__(self : Optional[int]) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : int , UpperCAmelCase_ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
lowerCamelCase__: str =value
def __add__(self : Dict , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__: Dict =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: List[str] =self[r, c] + another[r, c]
return result
def __neg__(self : str) ->Matrix:
'''simple docstring'''
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =-self[r, c]
return result
def __sub__(self : str , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self : List[str] , UpperCAmelCase_ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__: Dict =Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__: int =F"""Unsupported type given for another ({type(UpperCAmelCase_)})"""
raise TypeError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Matrix:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Optional[int] =self[r, c]
return result
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__: Tuple =v.transpose()
lowerCamelCase__: Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: List[str] =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__: Union[str, Any] =1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__: Optional[int] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =1, 2, -3
lowerCamelCase__: Optional[Any] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 273 | 1 |
def A ( a_ = 1_000_000 ) -> int:
__UpperCamelCase : List[Any] =limit + 1
__UpperCamelCase : Any =[0] * limit
for first_term in range(1 ,a_ ):
for n in range(a_ ,a_ ,a_ ):
__UpperCamelCase : str =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__UpperCamelCase : Dict =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 71 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :str = size if size is not None else {'''shortest_edge''': 224}
a :int = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
a :Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a :Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' )
a :Dict = do_resize
a :Optional[int] = size
a :Optional[int] = resample
a :Dict = do_center_crop
a :List[str] = crop_size
a :Any = do_rescale
a :Optional[int] = rescale_factor
a :str = do_normalize
a :Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a :Any = image_std if image_std is not None else OPENAI_CLIP_STD
a :List[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a :List[str] = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
a :List[Any] = do_resize if do_resize is not None else self.do_resize
a :Any = size if size is not None else self.size
a :Any = get_size_dict(_lowerCamelCase , param_name='''size''' , default_to_square=_lowerCamelCase )
a :str = resample if resample is not None else self.resample
a :int = do_center_crop if do_center_crop is not None else self.do_center_crop
a :Dict = crop_size if crop_size is not None else self.crop_size
a :int = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase )
a :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
a :List[str] = image_mean if image_mean is not None else self.image_mean
a :Dict = image_std if image_std is not None else self.image_std
a :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a :Optional[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a :int = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
a :Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
a :Optional[int] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
a :List[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
a :str = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
a :Union[str, Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
a :Dict = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
a :List[str] = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 357 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ):
"""simple docstring"""
a :List[Any] = 0
a :List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (PNDMScheduler,)
_SCREAMING_SNAKE_CASE = (('num_inference_steps', 50),)
def lowerCAmelCase__ ( self : Union[str, Any] , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
snake_case_ = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : List[Any]=0 , **_lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**_UpperCAmelCase )
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
snake_case_ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : Optional[int]=0 , **_lowerCAmelCase : int ) -> int:
"""simple docstring"""
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
snake_case_ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self : List[Any] , **_lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**_UpperCAmelCase )
snake_case_ = scheduler_class(**_UpperCAmelCase )
snake_case_ = 1_0
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , "set_timesteps" ):
snake_case_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case_ = dummy_past_residuals[:]
snake_case_ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case_ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
snake_case_ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(steps_offset=1 )
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=_UpperCAmelCase )
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def lowerCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case_ = 2_7
for scheduler_class in self.scheduler_classes:
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case_ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.full_loop()
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def lowerCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ = self.full_loop(prediction_type="v_prediction" )
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
snake_case_ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
snake_case_ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 159 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298 | 0 |
from PIL import Image
def UpperCamelCase_( _snake_case : Image , _snake_case : float ):
"""simple docstring"""
def brightness(_snake_case : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCAmelCase : str = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 308 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308 | 1 |
# using dfs for finding eulerian path traversal
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowerCamelCase , _lowerCamelCase : Dict =True, True
_lowerCamelCase : Optional[Any] =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =0
_lowerCamelCase : Union[str, Any] =-1
for i in range(SCREAMING_SNAKE_CASE__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowerCamelCase : Tuple =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowerCamelCase , _lowerCamelCase : Optional[int] =check_circuit_or_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_lowerCamelCase : Any =1
if check == 2:
_lowerCamelCase : Tuple =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_lowerCamelCase : int =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowerCamelCase : str ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowerCamelCase : List[Any] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowerCamelCase : Any ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowerCamelCase : Dict ={
1: [],
2: []
# all degree is zero
}
_lowerCamelCase : str =10
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 199 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class A :
UpperCamelCase__ : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ : Optional[str] =field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ : Optional[bool] =field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
UpperCamelCase__ : Optional[float] =field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
UpperCamelCase__ : Optional[float] =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class A :
UpperCamelCase__ : Optional[str] =field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase__ : Optional[str] =field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase__ : bool =field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : List[str] =list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class A :
UpperCamelCase__ : WavaVecaProcessor
UpperCamelCase__ : Union[bool, str] =True
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
def __call__( self : str , lowercase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : List[Any] =[{'input_values': feature['input_values']} for feature in features]
_lowerCamelCase : str =[{'input_ids': feature['labels']} for feature in features]
_lowerCamelCase : Union[str, Any] =self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
_lowerCamelCase : Any =self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
_lowerCamelCase : str =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_lowerCamelCase : List[Any] =labels
return batch
class A ( UpperCamelCase_ ):
def lowerCamelCase ( self : Optional[Any] , lowercase_ : nn.Module , lowercase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
_lowerCamelCase : List[Any] =self._prepare_inputs(lowercase_ )
if self.use_amp:
with autocast():
_lowerCamelCase : List[Any] =self.compute_loss(lowercase_ , lowercase_ )
else:
_lowerCamelCase : Optional[int] =self.compute_loss(lowercase_ , lowercase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCamelCase : Optional[Any] =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCamelCase : int =loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_lowerCamelCase : Dict =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowerCamelCase : Dict =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowerCamelCase : str =datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
_lowerCamelCase : int =datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
_lowerCamelCase : Dict =F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : Optional[Any] =re.sub(SCREAMING_SNAKE_CASE__ , '' , batch['sentence'] ).lower() + ' '
return batch
_lowerCamelCase : int =train_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
_lowerCamelCase : Tuple =eval_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : int =' '.join(batch['text'] )
_lowerCamelCase : Union[str, Any] =list(set(SCREAMING_SNAKE_CASE__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowerCamelCase : Tuple =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , )
_lowerCamelCase : Union[str, Any] =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , )
_lowerCamelCase : Optional[int] =list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
_lowerCamelCase : Tuple ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE__ )}
_lowerCamelCase : int =vocab_dict[' ']
del vocab_dict[" "]
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =len(SCREAMING_SNAKE_CASE__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : int =WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
_lowerCamelCase : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowerCamelCase : List[str] =min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples )
_lowerCamelCase : Any =train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
if data_args.max_val_samples is not None:
_lowerCamelCase : Optional[int] =eval_dataset.select(range(data_args.max_val_samples ) )
_lowerCamelCase : str =torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE__ : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple =torchaudio.load(batch['path'] )
_lowerCamelCase : Optional[int] =resampler(SCREAMING_SNAKE_CASE__ ).squeeze().numpy()
_lowerCamelCase : Optional[Any] =16_000
_lowerCamelCase : Tuple =batch['text']
return batch
_lowerCamelCase : Any =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Optional[int] =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_lowerCamelCase : int =processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE__ )
return batch
_lowerCamelCase : str =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Tuple =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowerCamelCase : Union[str, Any] =datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE__ : str ):
_lowerCamelCase : Union[str, Any] =pred.predictions
_lowerCamelCase : Optional[int] =np.argmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
_lowerCamelCase : Any =processor.tokenizer.pad_token_id
_lowerCamelCase : Any =processor.batch_decode(SCREAMING_SNAKE_CASE__ )
# we do not want to group tokens when computing the metrics
_lowerCamelCase : List[Any] =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowerCamelCase : Optional[int] =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
# Initialize our Trainer
_lowerCamelCase : Optional[Any] =CTCTrainer(
model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowerCamelCase : Optional[Any] =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowerCamelCase : Dict =model_args.model_name_or_path
else:
_lowerCamelCase : int =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowerCamelCase : Optional[int] =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
_lowerCamelCase : Any =train_result.metrics
_lowerCamelCase : Optional[int] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
_lowerCamelCase : List[Any] =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
_lowerCamelCase : str ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : Tuple =trainer.evaluate()
_lowerCamelCase : List[Any] =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Union[str, Any] =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main()
| 199 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 365 |
import torch
from diffusers import StableDiffusionPipeline
_lowercase : Optional[int] ="path-to-your-trained-model"
_lowercase : Union[str, Any] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_lowercase : Any ="A photo of sks dog in a bucket"
_lowercase : Any =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 266 | 0 |
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase = 6
UpperCAmelCase = 1
UpperCAmelCase = 1901
UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 273 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict ):
snake_case_ : Any = question_encoder
snake_case_ : int = generator
snake_case_ : Optional[Any] = self.question_encoder
def _snake_case ( self : Any , lowercase_ : str ):
if os.path.isfile(lowercase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ : Dict = os.path.join(lowercase_ , '''question_encoder_tokenizer''' )
snake_case_ : int = os.path.join(lowercase_ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def _snake_case ( cls : List[str] , lowercase_ : Any , **lowercase_ : Any ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : Optional[Any] = kwargs.pop('''config''' , lowercase_ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(lowercase_ )
snake_case_ : int = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def _snake_case ( self : str , *lowercase_ : List[str] , **lowercase_ : Dict ):
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
return self.generator.decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.question_encoder
def _snake_case ( self : Optional[Any] ):
snake_case_ : List[Any] = self.generator
def _snake_case ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "longest" , lowercase_ : str = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , lowercase_ , )
if max_length is None:
snake_case_ : str = self.current_tokenizer.model_max_length
snake_case_ : str = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Union[str, Any] = self.current_tokenizer.model_max_length
snake_case_ : Any = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = labels['''input_ids''']
return model_inputs
| 360 |
"""simple docstring"""
import math
import sys
def __lowercase ( _a ):
if number != int(_a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
snake_case_ : int = [-1] * (number + 1)
snake_case_ : int = 0
for i in range(1 , number + 1 ):
snake_case_ : Tuple = sys.maxsize
snake_case_ : List[Any] = int(math.sqrt(_a ) )
for j in range(1 , root + 1 ):
snake_case_ : Dict = 1 + answers[i - (j**2)]
snake_case_ : int = min(_a , _a )
snake_case_ : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
lowerCamelCase__ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
lowerCamelCase__ = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = set()
__lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : int = char
__lowerCAmelCase : List[str] = set(_snake_case )
return pairs
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Tuple = merges_file
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = 2
__lowerCAmelCase : Union[str, Any] = 3
self.add_from_file(_a )
__lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='utf-8' ) as merges_handle:
__lowerCAmelCase : List[str] = merges_handle.read().split('\n' )[:-1]
__lowerCAmelCase : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__lowerCAmelCase : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__lowerCAmelCase : Optional[int] = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[Any] = [self.sep_token_id]
__lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : List[Any] = tuple(_a )
__lowerCAmelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__lowerCAmelCase : Any = get_pairs(_a )
if not pairs:
return token
while True:
__lowerCAmelCase : str = min(_a , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase : List[str] = bigram
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : List[str] = 0
while i < len(_a ):
try:
__lowerCAmelCase : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Union[str, Any] = tuple(_a )
__lowerCAmelCase : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__lowerCAmelCase : List[Any] = get_pairs(_a )
__lowerCAmelCase : Optional[int] = "@@ ".join(_a )
__lowerCAmelCase : Tuple = word[:-4]
__lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : Dict = re.findall(R'\S+\n?' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_a , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = " ".join(_a ).replace('@@ ' , '' ).strip()
return out_string
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Union[str, Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_a , _a ):
try:
with open(_a , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__lowerCAmelCase : List[Any] = f.readlines()
for lineTmp in lines:
__lowerCAmelCase : Optional[Any] = lineTmp.strip()
__lowerCAmelCase : Union[str, Any] = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
__lowerCAmelCase : Optional[int] = line[:idx]
__lowerCAmelCase : Dict = len(self.encoder ) | 86 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Any ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 354 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Any , *_A : List[Any] , **_A : str ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _A , )
super().__init__(*_A , **_A ) | 275 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__magic_name__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__magic_name__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__magic_name__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__magic_name__ , default='''data/dump''' , help='''The dump file prefix.''' )
lowercase : Tuple = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowercase : Any = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowercase : int = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase : int = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowercase : List[str] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowercase : Dict = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(__magic_name__ )} examples to process.""" )
lowercase : Optional[int] = []
lowercase : int = 0
lowercase : List[str] = 1_00_00
lowercase : List[str] = time.time()
for text in data:
lowercase : Optional[Any] = F"""{bos} {text.strip()} {sep}"""
lowercase : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
lowercase : Optional[int] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase : Optional[int] = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(__magic_name__ )} examples processed.""" )
lowercase : Dict = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase : Any = [np.uintaa(__magic_name__ ) for d in rslt]
else:
lowercase : Any = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(__magic_name__ , '''wb''' ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 308 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) | 308 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : int , *snake_case : Optional[Any] , **snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Optional[int] , *snake_case : Optional[Any] , **snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : Any , **snake_case : int ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Optional[int] , *snake_case : Any , **snake_case : Any ) -> Dict:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Tuple , *snake_case : List[Any] , **snake_case : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : str , **snake_case : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : int , *snake_case : Optional[int] , **snake_case : int ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Union[str, Any] , **snake_case : Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : int , *snake_case : Optional[Any] , **snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Any , *snake_case : Tuple , **snake_case : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : str , **snake_case : Dict ) -> int:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Optional[Any] , **snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : str , *snake_case : Optional[int] , **snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : List[str] , **snake_case : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : Tuple , **snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : List[str] , **snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Optional[int] , **snake_case : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : int , *snake_case : Union[str, Any] , **snake_case : List[str] ) -> int:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[str] , *snake_case : Dict , **snake_case : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Optional[int] , *snake_case : Optional[int] , **snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Optional[Any] , **snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : int , *snake_case : Optional[Any] , **snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Union[str, Any] , **snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : Optional[Any] , **snake_case : int ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : int ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : List[Any] , *snake_case : Optional[int] , **snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Dict , *snake_case : List[str] , **snake_case : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Union[str, Any] , *snake_case : Optional[int] , **snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Tuple , *snake_case : Optional[Any] , **snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Any , *snake_case : Dict , **snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class _lowercase ( metaclass=_lowerCAmelCase ):
lowercase = ["sentencepiece"]
def __init__( self : Dict , *snake_case : Any , **snake_case : Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 175 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Dict ):
__snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : int = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Dict = -1
__snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : List[Any] = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : Tuple = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : List[Any] = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : List[Any] = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : str = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.decode(greedy_ids[0] )
__snake_case : List[Any] = TextIteratorStreamer(_lowerCAmelCase )
__snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : str = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
__snake_case : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Any = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : Dict = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
__snake_case : Optional[int] = greedy_ids[:, input_ids.shape[1] :]
__snake_case : List[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : int = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Dict = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase )
__snake_case : List[str] = -1
__snake_case : List[Any] = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : int = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : Dict = cs.out[:-1] # Remove the final "\n"
__snake_case : List[Any] = tokenizer(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case__ ( self : Optional[int] ):
__snake_case : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
__snake_case : Union[str, Any] = -1
__snake_case : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
__snake_case : List[str] = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
__snake_case : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : List[str] = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 20 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class a_ ( _a ):
lowercase = """deberta-v2"""
def __init__( self , _SCREAMING_SNAKE_CASE=128100 , _SCREAMING_SNAKE_CASE=1536 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=6144 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-7 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="gelu" , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(_SCREAMING_SNAKE_CASE ) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get("""pooler_hidden_size""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class a_ ( _a ):
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def A__ ( self ) -> Any:
"""simple docstring"""
return 12
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(preprocessor=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 321 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
a = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def lowercase (snake_case__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCAmelCase = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCAmelCase = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , snake_case__ )
# ffn -> feed_forward
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCAmelCase = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCAmelCase = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCAmelCase = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCAmelCase = """rwkv.""" + name
lowerCAmelCase = weight
return state_dict
def lowercase (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : Optional[int]=False , snake_case__ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCAmelCase = 50_277
lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCAmelCase = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
lowerCAmelCase = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
lowerCAmelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
lowerCAmelCase = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
lowerCAmelCase = hf_hub_download(snake_case__ , snake_case__ )
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = convert_state_dict(snake_case__ )
# 4. Split in shards and save
lowerCAmelCase , lowerCAmelCase = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + """\n"""
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCAmelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 155 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 362 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Optional[Any] ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
lowerCamelCase_ = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(A_ )
def a__ ( self : Optional[Any] , A_ : int , A_ : str = "pytorch" ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = os.path.join(A_ , 'output' )
lowerCamelCase_ = os.path.join(A_ , 'data' )
self._create_dummy_data(data_dir=A_ )
lowerCamelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A_ , env=self.get_env() )
lowerCamelCase_ = os.path.join(A_ , 'metrics.json' )
with open(A_ ) as f:
lowerCamelCase_ = json.load(A_ )
return result
@require_torch_gpu
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 208 | 0 |
'''simple docstring'''
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Dict = name
lowerCamelCase__ : Union[str, Any] = value
lowerCamelCase__ : str = weight
def __repr__( self: Dict ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCamelCase_ ( self: int ):
return self.value
def lowerCamelCase_ ( self: Tuple ):
return self.name
def lowerCamelCase_ ( self: str ):
return self.weight
def lowerCamelCase_ ( self: Any ):
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = sorted(UpperCamelCase , key=UpperCamelCase , reverse=UpperCamelCase )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ , lowerCamelCase__ : Dict = 0.0, 0.0
for i in range(len(UpperCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ () -> Tuple:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
class __lowercase (_UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = 2
@register_to_config
def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = sigma_max
# setable values
__lowerCAmelCase : int = None
__lowerCAmelCase : np.IntTensor = None
__lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = num_inference_steps
__lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ )
__lowerCAmelCase : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowerCAmelCase : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
__lowerCAmelCase : str = sigma + gamma * sigma
__lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
__lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
__lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__lowerCAmelCase : str = sample_prev + sigma_prev * model_output
__lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
__lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
raise NotImplementedError()
| 275 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] , lowercase__ : int , lowercase__ : Tuple):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowercase__) for s in shape])}.npy"""
def __snake_case ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
def __snake_case ( self : int , lowercase__ : str=0 , lowercase__ : List[str]=(4, 4, 64, 64) , lowercase__ : List[Any]=False):
'''simple docstring'''
lowerCAmelCase__ = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ = jnp.array(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__)) , dtype=lowercase__)
return image
def __snake_case ( self : Any , lowercase__ : Optional[Any]=False , lowercase__ : Dict="CompVis/stable-diffusion-v1-4"):
'''simple docstring'''
lowerCAmelCase__ = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ = 'bf16' if fpaa else None
lowerCAmelCase__ , lowerCAmelCase__ = FlaxUNetaDConditionModel.from_pretrained(
lowercase__ , subfolder='unet' , dtype=lowercase__ , revision=lowercase__)
return model, params
def __snake_case ( self : str , lowercase__ : Optional[int]=0 , lowercase__ : Any=(4, 77, 768) , lowercase__ : Optional[int]=False):
'''simple docstring'''
lowerCAmelCase__ = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase__ = jnp.array(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__)) , dtype=lowercase__)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
])
def __snake_case ( self : Tuple , lowercase__ : str , lowercase__ : Any , lowercase__ : Any):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase__)
lowerCAmelCase__ = self.get_latents(lowercase__ , fpaa=lowercase__)
lowerCAmelCase__ = self.get_encoder_hidden_states(lowercase__ , fpaa=lowercase__)
lowerCAmelCase__ = model.apply(
{'params': params} , lowercase__ , jnp.array(lowercase__ , dtype=jnp.intaa) , encoder_hidden_states=lowercase__ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowerCAmelCase__ = jnp.array(lowercase__ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase__ , lowercase__ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
])
def __snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Any):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase__)
lowerCAmelCase__ = self.get_latents(lowercase__ , shape=(4, 4, 96, 96) , fpaa=lowercase__)
lowerCAmelCase__ = self.get_encoder_hidden_states(lowercase__ , shape=(4, 77, 1_024) , fpaa=lowercase__)
lowerCAmelCase__ = model.apply(
{'params': params} , lowercase__ , jnp.array(lowercase__ , dtype=jnp.intaa) , encoder_hidden_states=lowercase__ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
lowerCAmelCase__ = jnp.array(lowercase__ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase__ , lowercase__ , atol=1e-2)
| 119 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['pixel_values']
def __init__( self : Tuple , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : bool = True , **lowercase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase__)
lowerCAmelCase__ = size if size is not None else {'height': 384, 'width': 384}
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
lowerCAmelCase__ = (size['height'], size['width'])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Optional[Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Any , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(lowercase__) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowercase__) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
lowerCAmelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase__)
return encoded_outputs
| 119 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[int]: # This function is recursive
_snake_case = len(__A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_snake_case = array[0]
_snake_case = False
_snake_case = 1
_snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
_snake_case = True
_snake_case = [element for element in array[i:] if element >= array[i]]
_snake_case = longest_subsequence(__A )
if len(__A ) > len(__A ):
_snake_case = temp_array
else:
i += 1
_snake_case = [element for element in array[1:] if element >= pivot]
_snake_case = [pivot, *longest_subsequence(__A )]
if len(__A ) > len(__A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 1 |
import math
class _snake_case :
def A__ ( self : str, __lowercase : list[list[float]], __lowercase : list[int] ):
lowercase__ = 0.0
lowercase__ = 0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]), 2 )
da += math.pow((sample[i] - weights[1][i]), 2 )
return 0 if da > da else 1
return 0
def A__ ( self : int, __lowercase : list[list[int | float]], __lowercase : list[int], __lowercase : int, __lowercase : float ):
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCAmelCase ( ):
# Training Examples ( m, n )
lowercase__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase__ = SelfOrganizingMap()
lowercase__ = 3
lowercase__ = 0.5
for _ in range(SCREAMING_SNAKE_CASE_ ):
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
# training sample
lowercase__ = training_samples[j]
# Compute the winning vector
lowercase__ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Update the winning vector
lowercase__ = self_organizing_map.update(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# classify test sample
lowercase__ = [0, 0, 0, 1]
lowercase__ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 224 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=32 ,snake_case=2 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : Optional[Any] = parent
lowercase : List[Any] = 13
lowercase : Any = 7
lowercase : Union[str, Any] = True
lowercase : Dict = True
lowercase : List[Any] = True
lowercase : List[str] = True
lowercase : Any = 99
lowercase : Tuple = 32
lowercase : Optional[Any] = 2
lowercase : Tuple = 4
lowercase : Tuple = 37
lowercase : Optional[int] = """gelu"""
lowercase : List[str] = 0.1
lowercase : Tuple = 0.1
lowercase : Union[str, Any] = 512
lowercase : List[str] = 16
lowercase : Optional[int] = 2
lowercase : str = 0.02
lowercase : int = 3
lowercase : Optional[Any] = 4
lowercase : str = None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : Tuple = None
if self.use_input_mask:
lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : List[str] = None
lowercase : Optional[int] = None
lowercase : Any = None
if self.use_labels:
lowercase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : Any = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=snake_case ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = TFRoFormerModel(config=snake_case )
lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : Tuple = [input_ids, input_mask]
lowercase : Any = model(snake_case )
lowercase : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = True
lowercase : Dict = TFRoFormerForCausalLM(config=snake_case )
lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase : Optional[int] = model(snake_case )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) ,[self.batch_size, self.seq_length, self.vocab_size] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = TFRoFormerForMaskedLM(config=snake_case )
lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase : Dict = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Any = TFRoFormerForSequenceClassification(config=snake_case )
lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_choices
lowercase : Tuple = TFRoFormerForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : Dict = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.num_labels
lowercase : List[str] = TFRoFormerForTokenClassification(config=snake_case )
lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = TFRoFormerForQuestionAnswering(config=snake_case )
lowercase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase : Dict = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = config_and_inputs
lowercase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Tuple= (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Dict= (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : List[Any]= False
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = TFRoFormerModelTester(self )
lowercase : Optional[Any] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(snake_case )
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Tuple = model(snake_case )[0]
# TODO Replace vocab size
lowercase : int = 50000
lowercase : Optional[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape ,snake_case )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Any = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,snake_case ,atol=1e-4 )
@require_tf
class __snake_case ( unittest.TestCase ):
_a : Dict= 1E-4
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = tf.constant([[4, 10]] )
lowercase : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 ,embedding_dim=6 )
lowercase : Optional[Any] = emba(input_ids.shape )
lowercase : Optional[int] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(snake_case ,snake_case ,atol=self.tolerance )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowercase : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 ,embedding_dim=512 )
emba([2, 16, 512] )
lowercase : Union[str, Any] = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case ,snake_case ,atol=self.tolerance )
@require_tf
class __snake_case ( unittest.TestCase ):
_a : int= 1E-4
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
lowercase : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
lowercase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 ,embedding_dim=64 )
lowercase : Union[str, Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case ,snake_case ,snake_case )
lowercase : Optional[Any] = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowercase : Optional[Any] = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] ,snake_case ,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] ,snake_case ,atol=self.tolerance )
| 20 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Optional[Any] = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def _snake_case( ) -> Dict:
lowercase : Optional[Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = """imagenet-1k-id2label.json"""
lowercase : List[str] = 1_000
lowercase : int = """huggingface/label-files"""
lowercase : Union[str, Any] = num_labels
lowercase : Optional[Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Dict = idalabel
lowercase : List[str] = {v: k for k, v in idalabel.items()}
lowercase : List[str] = CvtConfig(num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowercase : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowercase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase : int = [2, 2, 20]
lowercase : Optional[int] = [3, 12, 16]
lowercase : str = [192, 768, 1_024]
lowercase : Union[str, Any] = CvtForImageClassification(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowercase : Optional[Any] = image_size
lowercase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device("""cpu""" ) )
lowercase : Optional[Any] = OrderedDict()
lowercase : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE__ )
lowercase : str = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE__ )
for cnt in range(config.depth[idx] ):
lowercase : List[str] = list_of_state_dict + attention(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """speech_to_text_2"""
a_ : Tuple = ["""past_key_values"""]
a_ : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=10_24 , **__UpperCAmelCase , ) ->Any:
a_ = vocab_size
a_ = d_model
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = decoder_layerdrop
a_ = use_cache
a_ = decoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = max_target_positions
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) | 351 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 303 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCamelCase :Tuple = True
except ImportError:
lowerCamelCase :List[Any] = False
lowerCamelCase :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( lowerCamelCase__ ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def _a (lowercase ):
A_ : List[str] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=_a , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=_a , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=_a )
def __init__(self , lowercase , lowercase , lowercase=None , *lowercase ):
A_ : str = testing
A_ : List[Any] = testing_file
A_ : Tuple = path
def _a (self ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A_ : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_a ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
A_ : List[Any] = (
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A_ : List[str] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
A_ : List[Any] = json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , )
A_ : List[str] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
A_ : Optional[int] = json.load(_a )
A_ : Tuple = configuration['lowercase_modelname']
A_ : List[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A_ : Optional[Any] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A_ : List[Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A_ : Optional[Any] = 'Flax' in generate_tensorflow_pytorch_and_flax
A_ : Tuple = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(_a , exist_ok=_a )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=_a )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(_a , """r""" ) as f:
A_ : List[str] = f.readlines()
with open(_a , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
A_ : Any = mkstemp()
A_ : int = False
with fdopen(_a , """w""" ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
A_ : int = True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(_a , _a )
# Remove original file
remove(_a )
# Move new file
move(_a , _a )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(_a ) as datafile:
A_ : int = []
A_ : Optional[Any] = False
A_ : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A_ : Any = line.split("""\"""" )[1]
A_ : List[str] = skip_units(_a )
elif "# Below: " in line and "##" not in line:
A_ : str = line.split("""\"""" )[1]
A_ : Any = skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a , _a , _a )
A_ : str = []
elif "# Replace with" in line and "##" not in line:
A_ : int = []
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(_a ) | 206 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = vae_state_dict['encoder.conv_in.weight']
__lowerCamelCase : Dict = vae_state_dict['encoder.conv_in.bias']
__lowerCamelCase : Optional[Any] = vae_state_dict['encoder.conv_out.weight']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.conv_out.bias']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.weight']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.bias']
__lowerCamelCase : Dict = vae_state_dict['decoder.conv_in.weight']
__lowerCamelCase : List[str] = vae_state_dict['decoder.conv_in.bias']
__lowerCamelCase : Tuple = vae_state_dict['decoder.conv_out.weight']
__lowerCamelCase : List[str] = vae_state_dict['decoder.conv_out.bias']
__lowerCamelCase : Dict = vae_state_dict['decoder.norm_out.weight']
__lowerCamelCase : Union[str, Any] = vae_state_dict['decoder.norm_out.bias']
__lowerCamelCase : Any = vae_state_dict['quant_conv.weight']
__lowerCamelCase : Union[str, Any] = vae_state_dict['quant_conv.bias']
__lowerCamelCase : Tuple = vae_state_dict['post_quant_conv.weight']
__lowerCamelCase : List[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__lowerCamelCase : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__lowerCamelCase : int = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
__lowerCamelCase : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__lowerCamelCase : Dict = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(_lowerCAmelCase )
}
for i in range(_lowerCAmelCase ):
__lowerCamelCase : List[str] = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__lowerCamelCase : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__lowerCamelCase : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__lowerCamelCase : Optional[int] = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : List[Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__lowerCamelCase : str = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
__lowerCamelCase : int = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__lowerCamelCase : Optional[Any] = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Dict = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__lowerCamelCase : int = renew_vae_attention_paths(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = num_up_blocks - 1 - i
__lowerCamelCase : Any = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__lowerCamelCase : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__lowerCamelCase : Optional[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__lowerCamelCase : Tuple = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__lowerCamelCase : Union[str, Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
__lowerCamelCase : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__lowerCamelCase : str = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Any = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__lowerCamelCase : List[str] = renew_vae_attention_paths(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
return new_checkpoint
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,) -> List[Any]:
# Only support V1
__lowerCamelCase : Any = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__lowerCamelCase : str = io.BytesIO(r.content )
__lowerCamelCase : Any = OmegaConf.load(_lowerCAmelCase )
__lowerCamelCase : List[Any] = 512
__lowerCamelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__lowerCamelCase : Optional[Any] = {}
with safe_open(_lowerCAmelCase ,framework='pt' ,device='cpu' ) as f:
for key in f.keys():
__lowerCamelCase : int = f.get_tensor(_lowerCAmelCase )
else:
__lowerCamelCase : int = torch.load(_lowerCAmelCase ,map_location=_lowerCAmelCase )['state_dict']
# Convert the VAE model.
__lowerCamelCase : int = create_vae_diffusers_config(_lowerCAmelCase ,image_size=_lowerCAmelCase )
__lowerCamelCase : int = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Any = AutoencoderKL(**_lowerCAmelCase )
vae.load_state_dict(_lowerCAmelCase )
vae.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_UpperCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 208 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 14 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase : Optional[int] = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
UpperCamelCase : str = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
UpperCamelCase : Union[str, Any] = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: Union[str, Any]=False ) -> List[Any]:
try:
_UpperCAmelCase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : str = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Optional[int]:
return unittest.skip("Test was skipped" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> str:
return unittest.skipUnless(_run_slow_tests , "test is slow" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> List[Any]:
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Any:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Dict:
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> int:
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> int:
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[str]:
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any]=None , lowerCAmelCase: int=None ) -> Dict:
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version(">=" , lowerCamelCase__ ) , F'test requires torch version >= {version}' )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> int:
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(lowerCamelCase__ )
class a ( unittest.TestCase ):
_lowercase = True
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
_UpperCAmelCase : Any = tempfile.mkdtemp()
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A_ )
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = mocks if isinstance(A_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Optional[int]:
_UpperCAmelCase : Tuple = AcceleratorState()
_UpperCAmelCase : List[str] = tensor[None].clone().to(state.device )
_UpperCAmelCase : Dict = gather(lowerCamelCase__ ).cpu()
_UpperCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class a :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = returncode
_UpperCAmelCase : Optional[Any] = stdout
_UpperCAmelCase : Union[str, Any] = stderr
async def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: str ) -> int:
while True:
_UpperCAmelCase : Dict = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Dict=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: List[str]=None , lowerCAmelCase: Dict=False , lowerCAmelCase: Union[str, Any]=False ) -> Tuple:
if echo:
print("\nRunning: " , " ".join(lowerCamelCase__ ) )
_UpperCAmelCase : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
def tee(lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any]="" ):
_UpperCAmelCase : List[str] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="stderr:" ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: str=180 , lowerCAmelCase: Tuple=False , lowerCAmelCase: Optional[int]=True ) -> str:
_UpperCAmelCase : List[Any] = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = " ".join(lowerCamelCase__ )
if result.returncode > 0:
_UpperCAmelCase : Any = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class a ( snake_case_ ):
pass
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict=False ) -> List[Any]:
try:
_UpperCAmelCase : Optional[Any] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , "decode" ):
_UpperCAmelCase : Dict = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 353 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "autoformer"
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = prediction_length
_UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length
_UpperCAmelCase : Tuple = distribution_output
_UpperCAmelCase : List[Any] = loss
_UpperCAmelCase : Optional[Any] = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : str = lags_sequence
_UpperCAmelCase : Union[str, Any] = scaling
_UpperCAmelCase : Union[str, Any] = num_dynamic_real_features
_UpperCAmelCase : int = num_static_real_features
_UpperCAmelCase : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Union[str, Any] = cardinality
else:
_UpperCAmelCase : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : int = embedding_dimension
else:
_UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Any = decoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : int = decoder_layerdrop
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = use_cache
# Autoformer
_UpperCAmelCase : str = label_length
_UpperCAmelCase : Any = moving_average
_UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 189 | 0 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCAmelCase_ : int = dataset_size < in_memory_max_size
else:
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Union[str, Any] = is_small_dataset(lowerCAmelCase__ )
assert result == expected
| 224 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """linear"""
_SCREAMING_SNAKE_CASE = """cosine"""
_SCREAMING_SNAKE_CASE = """cosine_with_restarts"""
_SCREAMING_SNAKE_CASE = """polynomial"""
_SCREAMING_SNAKE_CASE = """constant"""
_SCREAMING_SNAKE_CASE = """constant_with_warmup"""
_SCREAMING_SNAKE_CASE = """piecewise_constant"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int = -1 ) -> Tuple:
"""simple docstring"""
return LambdaLR(lowerCAmelCase__ , lambda lowerCAmelCase__ : 1 , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int = -1 ) -> str:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 , lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : str , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = rule_str.split(':' )
lowerCAmelCase_ : List[str] = int(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = float(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : str = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
def rule_func(lowerCAmelCase__ : int ) -> float:
lowerCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Dict = create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.5 , lowerCAmelCase__ : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : Any ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : int=-1 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : Any = lr_init - lr_end
lowerCAmelCase_ : int = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, SchedulerType] , lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = -1 , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = SchedulerType(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ , step_rules=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , num_cycles=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , power=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
| 224 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _snake_case :
__A : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__A : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"})
__A : List[int] =list_field(
default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__A : str =field(
default=F'''inference_time_{round(time())}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__A : str =field(
default=F'''inference_memory_{round(time())}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__A : str =field(
default=F'''train_time_{round(time())}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__A : str =field(
default=F'''train_memory_{round(time())}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__A : str =field(
default=F'''env_info_{round(time())}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__A : str =field(
default=F'''log_{round(time())}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__A : int =field(default=3 , metadata={"help": "Times an experiment will be run."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def UpperCamelCase__ ( self ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." ,_snake_case ,)
def UpperCamelCase__ ( self ):
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def UpperCamelCase__ ( self ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def UpperCamelCase__ ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 354 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase_ : str = randint(-50_00 , 50_00 )
return (arr, r)
_lowerCamelCase = make_dataset()
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_SCREAMING_SNAKE_CASE , 3 ):
if sum(_SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(_SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase_ : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase_ : str = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
return (min(_SCREAMING_SNAKE_CASE ), min(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 67 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]=None ,**lowercase__ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[str] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 104 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | 0 |
"""simple docstring"""
A_ : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = [False] * len(_lowerCamelCase )
lowerCamelCase__ : Any = [s]
lowerCamelCase__ : Dict = True
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[str] = u
return visited[t]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = [-1] * (len(_lowerCamelCase ))
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = float('Inf' )
lowerCamelCase__ : List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : int = min(_lowerCamelCase , graph[parent[s]][s] )
lowerCamelCase__ : Tuple = parent[s]
max_flow += path_flow
lowerCamelCase__ : Optional[int] = sink
while v != source:
lowerCamelCase__ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : Tuple = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 14 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=a , )
assert hasattr(self , '''env''' )
def SCREAMING_SNAKE_CASE__ ( self : str , a : int ):
"""simple docstring"""
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=a , py_version='''py36''' , )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Dict ):
"""simple docstring"""
TrainingJobAnalytics(a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : int ):
"""simple docstring"""
__lowerCamelCase = self.create_estimator(a )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a )
| 357 | '''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number > 0:
raise ValueError('''input must be a negative integer''' )
__UpperCamelCase :str = len(bin(SCREAMING_SNAKE_CASE )[3:] )
__UpperCamelCase :Optional[Any] = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__UpperCamelCase :Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __a ( A__ ):
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Any = 8
# DPR tok
UpperCamelCase__ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Union[str, Any] = {"unk_token": "<unk>"}
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_dummy_dataset()
UpperCamelCase__ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Optional[int] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_dummy_dataset()
UpperCamelCase__ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "dataset" )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
UpperCamelCase__ : str = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase__ : List[Any] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
UpperCamelCase__ : Tuple = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
UpperCamelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
UpperCamelCase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : Any = self.get_dummy_legacy_index_retriever()
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : int ):
'''simple docstring'''
import torch
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : Optional[Any] = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
UpperCamelCase__ : List[Any] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Optional[int] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary. | 189 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = None
# Automatically constructed
snake_case_ = 'dict'
snake_case_ = None
snake_case_ = field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self : Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = None
snake_case_ = None
snake_case_ = None
# Automatically constructed
snake_case_ = 'dict'
snake_case_ = None
snake_case_ = field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = sorted(set(self.languages ) ) if self.languages else None
A__ : int = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] ):
'''simple docstring'''
A__ : List[Any] = set(self.languages )
if self.languages and set(snake_case ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(snake_case ) - lang_set ) )}) are not in valid set ({", ".join(snake_case )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(snake_case , snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A__ : Optional[int] = zip(*sorted(snake_case ) )
return {"language": languages, "translation": translations}
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 354 |
"""simple docstring"""
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : float , snake_case : int ):
'''simple docstring'''
if k in (0.04, 0.06):
A__ : Optional[int] = k
A__ : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def _UpperCamelCase ( self : int , snake_case : str ):
'''simple docstring'''
A__ : List[str] = cva.imread(snake_case , 0 )
A__ , A__ : Union[str, Any] = img.shape
A__ : list[list[int]] = []
A__ : Optional[Any] = img.copy()
A__ : List[str] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB )
A__ , A__ : List[Any] = np.gradient(snake_case )
A__ : List[Any] = dx**2
A__ : Any = dy**2
A__ : Dict = dx * dy
A__ : Any = 0.04
A__ : Optional[Any] = self.window_size // 2
for y in range(snake_case , h - offset ):
for x in range(snake_case , w - offset ):
A__ : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ : int = (wxx * wyy) - (wxy**2)
A__ : Any = wxx + wyy
A__ : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ , A_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 296 | 0 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 343 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase (__UpperCamelCase : Callable ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def _inner_fn(*__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , __UpperCamelCase , )
return fn(*__UpperCamelCase , **__UpperCamelCase )
return _inner_fn
| 85 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : List[str] ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
snake_case_ = TextIteratorStreamer(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Dict ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = greedy_ids[:, input_ids.shape[1] :]
snake_case_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_prompt=lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ )
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case_ = cs.out[:-1] # Remove the final "\n"
snake_case_ = tokenizer(lowercase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A_ ( self : str ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = TextIteratorStreamer(lowercase_ , timeout=0.001 )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_ ):
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 56 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''visual_bert'''
def __init__( self : Any , __UpperCAmelCase : Tuple=30_522 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Optional[Any]=512 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=3_072 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=1e-1_2 , __UpperCAmelCase : Any=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Dict=2 , **__UpperCAmelCase : Dict , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = visual_embedding_dim
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
a = bypass_transformer
a = special_visual_initialize
| 26 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] ):
lowerCAmelCase = {}
def __lowercase ( self : Optional[int] ):
print(self.vertex )
for i in self.vertex:
print(lowercase_ , """ -> """ , """ -> """.join([str(lowercase_ ) for j in self.vertex[i]] ) )
def __lowercase ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
lowerCAmelCase = [to_vertex]
def __lowercase ( self : Tuple ):
# visited array for storing already visited nodes
lowerCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : list ):
# mark start vertex as visited
lowerCAmelCase = True
print(lowercase_ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
a = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""note_seq"""]
def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int:
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 237 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = 'src/diffusers'
# Matches is_xxx_available()
a_ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a_ = '\n{0} = None\n'
a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
return "_and_".join(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : List[str] ={}
# Go through the end of the file
while line_index < len(UpperCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Any =objects
else:
line_index += 1
return backend_specific_objects
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ )
else:
return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Any=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] )
SCREAMING_SNAKE_CASE__ : Tuple =dummy_file
return dummy_files
def _a( UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE__ : str ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.read()
else:
SCREAMING_SNAKE_CASE__ : int =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 354 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : List[str]=7 , __lowercase : Dict=True , __lowercase : Tuple=False , __lowercase : Optional[Any]=99 , __lowercase : str=32 , __lowercase : List[str]=2 , __lowercase : str=4 , __lowercase : Optional[int]=37 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=40 , __lowercase : str=2 , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent
SCREAMING_SNAKE_CASE__ : List[Any] =batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] =seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] =is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE__ : str =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : int =num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
SCREAMING_SNAKE_CASE__ : Any =pad_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] =bos_token_id
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModel(config=__lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple =input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ : Tuple =inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__ : List[str] =1
# first forward pass
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : int =model(__lowercase , attention_mask=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Any =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case_ = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case_ = """google/pegasus-xsum"""
@cached_property
def __magic_name__ ( self : Optional[int] ) -> Tuple:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : List[str] , **__lowercase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.translate_src_text(**__lowercase )
assert self.expected_text == generated_words
def __magic_name__ ( self : Optional[Any] , **__lowercase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , )
SCREAMING_SNAKE_CASE__ : Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase )
return generated_words
@slow
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
self._assert_generated_batch_equal_expected() | 222 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int] , lowercase : Dict ) -> Optional[Any]:
# Construct model
if openai_config_file == "":
_a = OpenAIGPTConfig()
else:
_a = OpenAIGPTConfig.from_json_file(lowercase )
_a = OpenAIGPTModel(lowercase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase , lowercase , lowercase )
# Save pytorch-model
_a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 63 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = Generator(
cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
| 296 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a =typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowerCamelCase__ ) - np.asarray(lowerCamelCase__ )) ** 2 ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowerCamelCase__ , lowerCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 354 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if len(lowerCamelCase__ ) == 0:
return False
__lowerCamelCase : List[Any] = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
a =input("""Enter numbers separated by comma:\n""").strip()
a =[int(item.strip()) for item in user_input.split(""",""")]
a =int(input("""Enter the number to be found in the list:\n""").strip())
a ="""""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 113 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class a__:
def __init__( self : Dict , __snake_case : Union[str, Any]=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Optional[int]=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : str=None , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : Any=None , __snake_case : Any=None ):
if red is not None:
a : Optional[int] = red
if green is not None:
a : Optional[Any] = green
if blue is not None:
a : Tuple = blue
if red_edge is not None:
a : str = red_edge
if nir is not None:
a : List[Any] = nir
return True
def lowercase_ ( self : List[str] , __snake_case : List[str]="" , __snake_case : Dict=None , __snake_case : List[Any]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
a : int = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def lowercase_ ( self : Tuple ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase_ ( self : Tuple ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase_ ( self : Dict ):
return self.nir * (self.red / (self.green**2))
def lowercase_ ( self : Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase_ ( self : Any ):
return (self.nir - self.red) / (self.nir + self.red)
def lowercase_ ( self : Dict ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase_ ( self : Any ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase_ ( self : str ):
return (self.nir - self.green) / (self.nir + self.green)
def lowercase_ ( self : Optional[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase_ ( self : List[Any] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase_ ( self : Optional[int] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase_ ( self : int ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase_ ( self : List[Any] , __snake_case : Tuple=0.08 , __snake_case : List[str]=1.22 , __snake_case : int=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase_ ( self : Any ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase_ ( self : int ):
return (self.nir / self.green) - 1
def lowercase_ ( self : Dict ):
return (self.nir / self.redEdge) - 1
def lowercase_ ( self : Tuple ):
return (self.red - self.blue) / self.red
def lowercase_ ( self : Tuple ):
a : List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase_ ( self : Tuple ):
return self.nir - self.green
def lowercase_ ( self : int ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase_ ( self : Any ):
a : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowercase_ ( self : Dict , __snake_case : Any=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase_ ( self : str , __snake_case : Dict=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase_ ( self : Union[str, Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowercase_ ( self : Optional[Any] , __snake_case : int=None , __snake_case : List[str]=None ):
return (self.nir - b) / (a * self.red)
def lowercase_ ( self : List[str] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase_ ( self : Optional[Any] ):
return (self.red + self.green + self.blue) / 30.5
def lowercase_ ( self : List[Any] ):
return self.nir / self.red
def lowercase_ ( self : int ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase_ ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase_ ( self : int ):
return self.green / (self.nir + self.red + self.green)
def lowercase_ ( self : Tuple ):
return self.nir / (self.nir + self.red + self.green)
def lowercase_ ( self : Any ):
return self.red / (self.nir + self.red + self.green)
def lowercase_ ( self : Optional[int] ):
return (self.green - self.red) / (self.green + self.red)
def lowercase_ ( self : int ):
return (self.red - self.green) / (self.red + self.green)
def lowercase_ ( self : int ):
a : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase_ ( self : Dict ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase_ ( self : Any ):
return self.nir / self.red
def lowercase_ ( self : Any ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase_ ( self : Optional[int] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase: Optional[int] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 114 | 1 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Optional[Any] = 50 ) -> int:
UpperCAmelCase : int = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 23 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "gpt_neox"
def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = rotary_pct
UpperCamelCase = rotary_emb_base
UpperCamelCase = attention_dropout
UpperCamelCase = hidden_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = tie_word_embeddings
UpperCamelCase = use_parallel_residual
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
UpperCamelCase = self.rope_scaling.get('type' , A_ )
UpperCamelCase = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 222 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase_ : str = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A ) | 215 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=[30, 30] , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=None , __A=8 , __A=10 , ) -> List[Any]:
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =scope
a =n_targets
a =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a =(image_size[1] // patch_size) * (image_size[0] // patch_size)
a =num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
a =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a =[]
for i in range(self.batch_size ):
a ={}
a =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__A )
a =torch.rand(self.n_targets , 4 , device=__A )
labels.append(__A )
a =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> int:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> List[Any]:
a =YolosModel(config=__A )
model.to(__A )
model.eval()
a =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Dict:
a =YolosForObjectDetection(__A )
model.to(__A )
model.eval()
a =model(pixel_values=__A )
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
a =model(pixel_values=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Any:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a =[]
for i in range(self.model_tester.batch_size ):
a ={}
a =torch.ones(
size=(self.model_tester.n_targets,) , device=__A , dtype=torch.long )
a =torch.ones(
self.model_tester.n_targets , 4 , device=__A , dtype=torch.float )
labels.append(__A )
a =labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =YolosModelTester(self )
a =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# YOLOS does not use inputs_embeds
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
# in YOLOS, the seq_len is different
a =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a =True
a =False
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a =len(__A )
# Check attention is always last and order is fine
a =True
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =1
self.assertEqual(out_len + added_hidden_states , len(__A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.hidden_states
a =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
# YOLOS has a different seq_length
a =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =YolosModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__A )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
a =model(inputs.pixel_values )
# verify outputs
a =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __A )
a =torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=__A , )
a =torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1E-4 ) )
# verify postprocessing
a =image_processor.post_process_object_detection(
__A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
a =torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(__A )
a =[75, 75, 17, 63, 17]
a =torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(__A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __A , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __A ) ) | 215 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__UpperCamelCase : Optional[Any] = list[list[float | int]]
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : List[Any] = 42
lowerCAmelCase__ : Optional[Any] = 42
lowerCAmelCase__ : List[Any] = 42
lowerCAmelCase__ : int = 42
lowerCAmelCase__ : str = 42
lowerCAmelCase__ : List[str] = 42
for row in range(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = matrix[row][col]
lowerCAmelCase__ : int = vector[row][0]
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Tuple = 0
while row < size and col < size:
# pivoting
lowerCAmelCase__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase__ : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE_ ):
for row in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE_ )
]
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[int] = [[0] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : List[str] = 42
lowerCAmelCase__ : Any = 42
lowerCAmelCase__ : int = 42
lowerCAmelCase__ : int = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = (x_val + 1) ** (size - col - 1)
lowerCAmelCase__ : Union[str, Any] = y_val
lowerCAmelCase__ : Dict = solve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def interpolated_func(A_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE_ ) )
return interpolated_func
def __SCREAMING_SNAKE_CASE ( A_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __SCREAMING_SNAKE_CASE ( A_ = question_function , A_ = 10 ):
lowerCAmelCase__ : Optional[Any] = [func(SCREAMING_SNAKE_CASE_ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase__ : Union[str, Any] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 42
lowerCAmelCase__ : Optional[Any] = 42
for poly in polynomials:
lowerCAmelCase__ : Optional[Any] = 1
while func(SCREAMING_SNAKE_CASE_ ) == poly(SCREAMING_SNAKE_CASE_ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE_ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__UpperCamelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 113 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case = ['''small''', '''medium''', '''large''']
_snake_case = '''lm_head.decoder.weight'''
_snake_case = '''lm_head.weight'''
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : str = torch.load(snake_case__ )
__UpperCAmelCase : str = d.pop(snake_case__ )
os.makedirs(snake_case__, exist_ok=snake_case__ )
torch.save(snake_case__, os.path.join(snake_case__, snake_case__ ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
_snake_case = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Dict = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 357 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a : List[str] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
a : Optional[int] = "|".join(sys.argv[1:])
a : str = re.compile(rf"""^({joined_dirs}).*?\.py$""")
a : int = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 114 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__UpperCAmelCase : Union[str, Any] = DetaConfig(
backbone_config=__lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
__UpperCAmelCase : List[str] = """huggingface/label-files"""
if "o365" in model_name:
__UpperCAmelCase : Any = 366
__UpperCAmelCase : Union[str, Any] = """object365-id2label.json"""
else:
__UpperCAmelCase : Tuple = 91
__UpperCAmelCase : str = """coco-detection-id2label.json"""
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__UpperCAmelCase : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = dct.pop(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : int = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__UpperCAmelCase : str = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[: dim]
__UpperCAmelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Dict = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any ):
# transformer decoder self-attention layers
__UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : Any = in_proj_bias[:hidden_size]
__UpperCAmelCase : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : List[Any] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : int = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Any = get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
__UpperCAmelCase : int = torch.load(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
__UpperCAmelCase : Dict = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : str = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
if "input_proj" in key:
__UpperCAmelCase : Any = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Dict = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Optional[Any] = DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__lowerCamelCase )
# load image processor
__UpperCAmelCase : int = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Tuple = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[int] = encoding["""pixel_values"""]
__UpperCAmelCase : List[str] = model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
__UpperCAmelCase : int = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 114 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''keras_nlp''']
def __init__( self : Any , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> str:
requires_backends(self , ['''keras_nlp'''] )
| 357 | """simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None ) -> int:
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(__UpperCamelCase )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self : List[str] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> str:
_UpperCamelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 100 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCamelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float ) -> torch.FloatTensor:
_UpperCamelCase , _UpperCamelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCamelCase = torch.exp(__UpperCamelCase )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 54 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
A_ : Dict = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = tmp_path_factory.getbasetemp() / """cache"""
_UpperCAmelCase : List[str] = test_hf_cache_home / """datasets"""
_UpperCAmelCase : Dict = test_hf_cache_home / """metrics"""
_UpperCAmelCase : Optional[Any] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCAmelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCAmelCase_ ) )
_UpperCAmelCase : Tuple = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCAmelCase_ ) )
_UpperCAmelCase : Dict = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ , scope="""session""" )
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCAmelCase_ )
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCAmelCase_ )
| 215 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
A_ : Dict = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = EfficientNetConfig()
_UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase : int = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase : List[str] = """huggingface/label-files"""
_UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
_UpperCAmelCase : List[str] = 1000
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Tuple = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase : Optional[int] = sorted(set(lowerCAmelCase_ ) )
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
_UpperCAmelCase : List[str] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase : Any = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase : str = """efficientnet.""" + item[1]
_UpperCAmelCase : Optional[Any] = """classifier.weight"""
_UpperCAmelCase : Optional[Any] = """classifier.bias"""
return key_mapping
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase : Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_UpperCAmelCase : int = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
_UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , )
_UpperCAmelCase : List[str] = original_model.trainable_variables
_UpperCAmelCase : Any = original_model.non_trainable_variables
_UpperCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase : Dict = param.numpy()
_UpperCAmelCase : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase : List[Any] = get_efficientnet_config(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
_UpperCAmelCase : int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase : Optional[int] = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase : str = convert_image_processor(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = hf_model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_UpperCAmelCase : Optional[Any] = image.img_to_array(lowerCAmelCase_ )
_UpperCAmelCase : str = np.expand_dims(lowerCAmelCase_ , axis=0 )
_UpperCAmelCase : str = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase : List[Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
A_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 215 | 1 |
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text, pattern
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(_A ), len(_A )
def _A ( self , _A ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _A ( self , _A ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
__SCREAMING_SNAKE_CASE = self.mismatch_in_text(_A )
if mismatch_index == -1:
positions.append(_A )
else:
__SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
__SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ : Optional[Any] ='''ABAABA'''
lowerCAmelCase__ : List[str] ='''AB'''
lowerCAmelCase__ : Tuple =BoyerMooreSearch(text, pattern)
lowerCAmelCase__ : Any =bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 118 |
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE = 1
for n in range(m + 1 ):
for k in range(1 , a__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ : str =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 118 | 1 |
from scipy.stats import spearmanr
import datasets
__magic_name__: str = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
__magic_name__: List[Any] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
__magic_name__: Tuple = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def __magic_name__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
__magic_name__ : str = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 342 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> bool:
if number < 0:
raise ValueError('''number must not be negative''')
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def __A ( a_ :Image) -> str:
__a : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __A ( a_ :Image) -> Dict:
__a : Any = np.array(a_)
__a : Tuple = npimg.shape
return {"hash": hashimage(a_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowerCamelCase ( self ):
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
__a : Dict = '''facebook/sam-vit-huge'''
__a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase )
__a : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
] , ) | 188 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_lowerCAmelCase = nn.Parameter(lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_lowerCAmelCase = nn.Parameter(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
else:
set_layer_weights_in_torch_local(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase ) , )
if isinstance(weights[3] , lowerCAmelCase ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_lowerCAmelCase = nn.Parameter(torch.tensor(lowerCAmelCase ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ReformerConfig.from_json_file(lowerCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase = ReformerModelWithLMHead(lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as f:
_lowerCAmelCase = pickle.load(lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase , lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
A__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Dict =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 70 |
_A = [0, 2, 4, 6, 8]
_A = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( a__ : int , a__ : int , a__ : list[int] , a__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCamelCase_ = 0
for digit in range(10 ):
UpperCamelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a__ , a__ )
return result
UpperCamelCase_ = 0
for digita in range(10 ):
UpperCamelCase_ = digita
if (remainder + digita) % 2 == 0:
UpperCamelCase_ = ODD_DIGITS
else:
UpperCamelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
UpperCamelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , )
return result
def lowerCamelCase__ ( a__ : int = 9 ) -> int:
UpperCamelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = IFImgaImgSuperResolutionPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase_ ( self ) -> Dict:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[str]:
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(_lowerCamelCase )
else:
A_ : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase_ ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self ) -> Dict:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 164 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase__ : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ : Optional[Any] = 'zero2'
UpperCamelCase__ : Optional[int] = 'zero3'
UpperCamelCase__ : Dict = [ZEROa, ZEROa]
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = parameterized.to_safe_name("""_""".join(str(a_ ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
UpperCamelCase__ : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , ) -> List[str]:
A_ : Union[str, Any] = models[model]
A_ : Tuple = self.run_trainer(
stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
self.do_checks(_lowerCamelCase )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , ) -> Any:
A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCamelCase )
A_ : str = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : Tuple = self.get_launcher(_lowerCamelCase )
A_ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 164 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Dict = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :Tuple = use_input_mask
UpperCamelCase__ :str = use_token_type_ids
UpperCamelCase__ :str = use_labels
UpperCamelCase__ :Optional[Any] = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_size
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :int = attention_probs_dropout_prob
UpperCamelCase__ :int = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :Optional[Any] = type_sequence_label_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :List[str] = num_labels
UpperCamelCase__ :int = num_choices
UpperCamelCase__ :List[Any] = scope
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Tuple = None
if self.use_token_type_ids:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :int = None
if self.use_labels:
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :str = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
UpperCamelCase__ :str = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :int = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :str = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
UpperCamelCase__ :Any = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
UpperCamelCase__ :Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Any = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = True
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :Tuple = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :int = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :List[str] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
UpperCamelCase__ :List[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = config_and_inputs
UpperCamelCase__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_a = (LlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = LlamaModelTester(self )
UpperCamelCase__ :List[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[Any] = 3
UpperCamelCase__ :Optional[Any] = input_dict['''input_ids''']
UpperCamelCase__ :Union[str, Any] = input_ids.ne(1 ).to(UpperCAmelCase__ )
UpperCamelCase__ :int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ :Optional[Any] = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :int = '''single_label_classification'''
UpperCamelCase__ :Union[str, Any] = input_dict['''input_ids''']
UpperCamelCase__ :int = input_ids.ne(1 ).to(UpperCAmelCase__ )
UpperCamelCase__ :Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ :Dict = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Union[str, Any] = 3
UpperCamelCase__ :Optional[int] = '''multi_label_classification'''
UpperCamelCase__ :List[Any] = input_dict['''input_ids''']
UpperCamelCase__ :Tuple = input_ids.ne(1 ).to(UpperCAmelCase__ )
UpperCamelCase__ :Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ :List[str] = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ :Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ :Union[str, Any] = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
UpperCamelCase__ :Dict = original_model(UpperCAmelCase__ ).last_hidden_state
UpperCamelCase__ :Tuple = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ :Union[str, Any] = {'''type''': scaling_type, '''factor''': 10.0}
UpperCamelCase__ :Optional[int] = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
UpperCamelCase__ :Dict = scaled_model(UpperCAmelCase__ ).last_hidden_state
UpperCamelCase__ :List[str] = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase__ :Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
UpperCamelCase__ :Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase__ :List[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase__ :List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase__ :Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
UpperCamelCase__ :str = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
UpperCamelCase__ :str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase__ :str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase__ :Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
UpperCamelCase__ :Union[str, Any] = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
UpperCamelCase__ :Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase__ :Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase__ :Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
UpperCamelCase__ :Any = model(torch.tensor(UpperCAmelCase__ ) )
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase__ :List[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi'''
UpperCamelCase__ :str = '''Simply put, the theory of relativity states that '''
UpperCamelCase__ :Dict = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
UpperCamelCase__ :str = tokenizer.encode(UpperCAmelCase__ , return_tensors='''pt''' )
UpperCamelCase__ :Dict = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
UpperCamelCase__ :Tuple = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
UpperCamelCase__ :List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) | 97 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : List[str] =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 , _lowerCamelCase : str = "bert-base-cased" ):
A__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any ):
model.eval()
A__ = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__, A__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
# Initialize accelerator
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = args.model_name_or_path
set_seed(_lowerCamelCase )
A__, A__ = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
A__ = 1
A__ = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
A__ = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
A__ = evaluate.load("glue" , "mrpc" )
A__ = num_epochs
if args.partial_train_epoch is not None:
A__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ = args.resume_from_checkpoint.split("epoch_" )[1]
A__ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ = int(_lowerCamelCase ) + 1
A__ = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.print("resumed checkpoint performance:" , _lowerCamelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , "r" ) as f:
A__ = json.load(_lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ = F"epoch_{epoch}"
A__ = os.path.join(args.output_dir , _lowerCamelCase )
accelerator.save_state(_lowerCamelCase )
A__ = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = accuracy
A__ = lr_scheduler.get_lr()[0]
A__ = optimizer.param_groups[0]["lr"]
A__ = epoch
A__ = overall_step
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--output_dir" , type=_lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowerCamelCase , default=_lowerCamelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_lowerCamelCase , default=_lowerCamelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_lowerCamelCase , default=2 , help="Number of train epochs." , )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 123 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Union[str, Any]=13 , lowercase_ :Union[str, Any]=10 , lowercase_ :Any=3 , lowercase_ :Tuple=2 , lowercase_ :List[Any]=2 , lowercase_ :int=True , lowercase_ :int=True , lowercase_ :List[str]=32 , lowercase_ :Dict=5 , lowercase_ :List[Any]=4 , lowercase_ :List[Any]=37 , lowercase_ :List[Any]="gelu" , lowercase_ :int=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=10 , lowercase_ :int=0.0_2 , lowercase_ :Union[str, Any]="divided_space_time" , lowercase_ :Tuple=None , )-> Tuple:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = attention_type
A__ = initializer_range
A__ = scope
A__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ = (image_size // patch_size) ** 2
A__ = (num_frames) * self.num_patches_per_frame + 1
def UpperCAmelCase_ ( self :str )-> str:
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :int )-> Any:
A__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ = self.num_labels
return config
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :Tuple )-> Optional[int]:
A__ = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Dict )-> Tuple:
A__ = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
# verify the logits shape
A__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__ = self.prepare_config_and_inputs()
A__, A__, A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowercase = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = TimesformerModelTester(self )
A__ = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int , lowercase_ :Dict , lowercase_ :int=False )-> str:
A__ = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self :Dict )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :Any )-> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
if not self.has_attentions:
pass
else:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length
A__ = self.model_tester.num_frames
A__ = True
A__ = False
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ = len(lowercase_ )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCAmelCase_ ( self :List[Any] )-> List[str]:
def check_hidden_states_output(lowercase_ :Dict , lowercase_ :int , lowercase_ :List[Any] ):
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( ):
A__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self :int )-> Any:
A__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase_ )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(**lowercase_ )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
A__ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 123 | 1 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : Dict ) -> Optional[Any]:
super().__init__(*__magic_name__ , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.values[key]
def __A ( self : str ) -> int:
return (
sum(self.charge_factor - len(__magic_name__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=None ) -> int:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__magic_name__ ) == 0
):
return key
return super()._collision_resolution(__magic_name__ , __magic_name__ )
| 118 | import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DebertaTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = DebertaTokenizerFast
def __A ( self : List[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
SCREAMING_SNAKE_CASE_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ = {"unk_token": "[UNK]"}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : str , __magic_name__ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = "lower newer"
return input_text, output_text
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer("Hello" , "World" )
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __magic_name__ )
@slow
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("sequence builders" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(
"sequence builders" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
SCREAMING_SNAKE_CASE_ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["input_ids"]]
# fmt: off
SCREAMING_SNAKE_CASE_ = {
"input_ids": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE_ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 118 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class A :
__UpperCAmelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__UpperCAmelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCAmelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCAmelCase : bool = field(
default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase_ (self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.task_name.lower()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'train'
__UpperCAmelCase : Union[str, Any] = 'dev'
__UpperCAmelCase : Optional[Any] = 'test'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : GlueDataTrainingArguments
__UpperCAmelCase : str
__UpperCAmelCase : List[InputFeatures]
def __init__(self : Dict , __UpperCAmelCase : GlueDataTrainingArguments , __UpperCAmelCase : PreTrainedTokenizerBase , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Split] = Split.train , __UpperCAmelCase : Optional[str] = None , ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __UpperCAmelCase , )
UpperCAmelCase__ = args
UpperCAmelCase__ = glue_processors[args.task_name]()
UpperCAmelCase__ = glue_output_modes[args.task_name]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
try:
UpperCAmelCase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCAmelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCAmelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase__ , UpperCAmelCase__ = label_list[2], label_list[1]
UpperCAmelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase__ = cached_features_file + ".lock"
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = torch.load(__UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCAmelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase__ = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase__ = examples[:limit_length]
UpperCAmelCase__ = glue_convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , max_length=args.max_seq_length , label_list=__UpperCAmelCase , output_mode=self.output_mode , )
UpperCAmelCase__ = time.time()
torch.save(self.features , __UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self : Any ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__(self : int , __UpperCAmelCase : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
return self.label_list
| 143 | import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class A :
def __enter__(self : Dict ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase__ = torch.cuda.memory_allocated()
return self
def __exit__(self : List[str] , *__UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase__ = torch.cuda.memory_allocated()
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
UpperCAmelCase__ = bamb(self.end - self.begin )
UpperCAmelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( __A, __A = 16, __A = "bert-base-cased", __A = 320, __A = 160, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase__ = load_dataset(
"glue", "mrpc", split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""} )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__A, max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ = datasets.map(
__A, batched=__A, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(__A, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"], shuffle=__A, collate_fn=__A, batch_size=__A )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"], shuffle=__A, collate_fn=__A, batch_size=__A )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = args.model_name_or_path
set_seed(__A )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(__A, __A, __A, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(__A, return_dict=__A )
# Instantiate optimizer
UpperCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase__ = optimizer_cls(params=model.parameters(), lr=__A )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase__ = 1
UpperCAmelCase__ = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=__A, num_warmup_steps=0, num_training_steps=__A, )
else:
UpperCAmelCase__ = DummyScheduler(__A, total_num_steps=__A, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
__A, __A, __A, __A, __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase__ = 0
# Now we train the model
UpperCAmelCase__ = {}
for epoch in range(__A, __A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__A ):
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "peak_memory_utilization.json" ), "w" ) as f:
json.dump(__A, __A )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=__A, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=__A, )
parser.add_argument(
"--output_dir", type=__A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--peak_memory_upper_bound", type=__A, default=__A, help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", )
parser.add_argument(
"--n_train", type=__A, default=320, help="Number of training examples to use.", )
parser.add_argument(
"--n_val", type=__A, default=160, help="Number of validation examples to use.", )
parser.add_argument(
"--num_epochs", type=__A, default=1, help="Number of train epochs.", )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__A, __A )
if __name__ == "__main__":
main()
| 143 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase__ ( _A : str ):
'''simple docstring'''
a__, a__ =analyze_text(_A )
a__ =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
a__ =sum(single_char_strings.values() )
# one length string
a__ =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
a__ =single_char_strings[ch]
a__ =my_str / all_sum
my_fir_sum += prob * math.loga(_A ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
a__ =sum(two_char_strings.values() )
a__ =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
a__ =cha + cha
if sequence in two_char_strings:
a__ =two_char_strings[sequence]
a__ =int(_A ) / all_sum
my_sec_sum += prob * math.loga(_A )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase__ ( _A : str ):
'''simple docstring'''
a__ =Counter() # type: ignore
a__ =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_A ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 188 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 188 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
if attention_mask is None:
__lowerCAmelCase : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Tuple = seq_length
__lowerCAmelCase : Optional[int] = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : int = pad_token_id
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : Optional[Any] = initializer_range
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[Any] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
__lowerCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = 20
__lowerCAmelCase : Union[str, Any] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : Any = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( unittest.TestCase):
A_ : Optional[Any] = 9_9
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = lm_model(input_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : Union[str, Any] = lm_model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Optional[Any] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
__lowerCAmelCase : Dict = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : str = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A__ ( _lowerCamelCase , unittest.TestCase , _lowerCamelCase):
A_ : str = True
A_ : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = FlaxBlenderbotModelTester(self )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase : Any = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase : Union[str, Any] = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCAmelCase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase : Dict = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase : Dict = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Any = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__lowerCAmelCase : List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__lowerCAmelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__lowerCAmelCase : List[str] = ['Sam']
__lowerCAmelCase : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='jax' )
__lowerCAmelCase : Optional[int] = model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = 'Sam is a great name. It means "sun" in Gaelic.'
__lowerCAmelCase : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
assert generated_txt[0].strip() == tgt_text | 182 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase : Tuple = dataset_size < in_memory_max_size
else:
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[int] = is_small_dataset(_UpperCamelCase )
assert result == expected | 182 | 1 |
'''simple docstring'''
def _A ( lowercase__ = 100 ):
lowercase__ = set()
lowercase__ = 0
lowercase__ = n + 1 # maximum limit
for a in range(2 , lowercase__ ):
for b in range(2 , lowercase__ ):
lowercase__ = a**b # calculates the current power
collect_powers.add(lowercase__ ) # adds the result to the set
return len(lowercase__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 164 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return self.id
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = weight
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _A ( lowercase__ , lowercase__ ):
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
lowercase__ = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=12 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=0.02 ,__lowerCamelCase=0 ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : List[str] = projection_dim
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Dict = dropout
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Union[str, Any] = scope
lowerCAmelCase__ : Optional[int] = bos_token_id
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[str] = None
if self.use_input_mask:
lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase__ : Optional[int] = input_mask.numpy()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = input_mask.shape
lowerCAmelCase__ : List[Any] = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = TFBlipTextModel(config=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,training=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(TFBlipTextModel,) if is_tf_available() else ()
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = BlipTextModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@slow
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = TFBlipTextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase=True ) -> Any:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCamelCase )
| 94 |
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
lowerCAmelCase__ : str = len(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCAmelCase__ : List[Any] = True
for i in range(lowerCamelCase_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase__ : int = True
if a[i].islower():
lowerCAmelCase__ : int = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 1 |
from collections.abc import Callable
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : Callable | None = None ) -> None:
# Stores actual heap items.
__snake_case : list = []
# Stores indexes of each item for supporting updates and deletion.
__snake_case : dict = {}
# Stores current size of heap.
__snake_case : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__snake_case : Optional[Any] = key or (lambda lowerCamelCase : x)
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __snake_case ( self : Tuple , lowerCamelCase : int ) -> int | None:
__snake_case : int = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __snake_case ( self : int , lowerCamelCase : int ) -> int | None:
__snake_case : List[str] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> None:
__snake_case , __snake_case : List[str] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__snake_case , __snake_case : Dict = self.arr[j], self.arr[i]
def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __snake_case ( self : Dict , lowerCamelCase : int ) -> int:
__snake_case : Union[str, Any] = self._left(lowerCamelCase )
__snake_case : Tuple = self._right(lowerCamelCase )
__snake_case : Optional[int] = i
if left is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
__snake_case : int = left
if right is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
__snake_case : int = right
return valid_parent
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> None:
__snake_case : Tuple = self._parent(lowerCamelCase )
while parent is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
self._swap(lowerCamelCase , lowerCamelCase )
__snake_case , __snake_case : List[str] = parent, self._parent(lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> None:
__snake_case : Dict = self._get_valid_parent(lowerCamelCase )
while valid_parent != index:
self._swap(lowerCamelCase , lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = valid_parent, self._get_valid_parent(lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
__snake_case : Optional[int] = self.pos_map[item]
__snake_case : Optional[Any] = [item, self.key(lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
__snake_case : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
__snake_case : Optional[int] = self.arr[self.size - 1]
__snake_case : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> None:
__snake_case : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase )] )
else:
__snake_case : Union[str, Any] = [item, self.key(lowerCamelCase )]
__snake_case : Dict = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __snake_case ( self : Union[str, Any] ) -> tuple | None:
return self.arr[0] if self.size else None
def __snake_case ( self : List[str] ) -> tuple | None:
__snake_case : Optional[int] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "▁"
_snake_case : Any = {"vocab_file": "prophetnet.tokenizer"}
_snake_case : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
__snake_case : Dict = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
__snake_case : Optional[int] = token.rstrip("\n" )
__snake_case : Union[str, Any] = index
return vocab
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__snake_case : List[str] = F'[unused{i}]'
__snake_case : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Any = 12
__snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : str ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __snake_case ( self : Any ) -> int:
__snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : str , lowerCamelCase : str ) -> str:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 123 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->Tuple: # This function is recursive
"""simple docstring"""
lowercase : Dict = len(__SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase : Tuple = array[0]
lowercase : List[Any] = False
lowercase : str = 1
lowercase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase : Any = True
lowercase : Optional[Any] = [element for element in array[i:] if element >= array[i]]
lowercase : Union[str, Any] = longest_subsequence(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
lowercase : int = temp_array
else:
i += 1
lowercase : Optional[int] = [element for element in array[1:] if element >= pivot]
lowercase : List[str] = [pivot, *longest_subsequence(__SCREAMING_SNAKE_CASE )]
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = ''
snake_case__ : int = ''
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = 0
snake_case__ : List[str] = 256
snake_case__ : List[Any] = 0
snake_case__ : Tuple = 0
snake_case__ : List[Any] = 0
snake_case__ : Tuple = 0
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : str = cva.imread(__UpperCamelCase , 0 )
snake_case__ : Dict = copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : int = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
snake_case__ : List[Any] = np.sum(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case__ : Union[str, Any] = x[i] / self.k
self.sk += prk
snake_case__ : Optional[int] = (self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : int = int(last % last )
snake_case__ : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__UpperCamelCase )
snake_case__ : Any = int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : int = self.img[j][i]
if num != self.last_list[num]:
snake_case__ : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def __a ( self ) -> Tuple:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCAmelCase__ : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 143 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : Tuple = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = BartTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : Any = getattr(__UpperCamelCase , pre_tok_state.pop('type' ) )
snake_case__ : List[str] = add_prefix_space
snake_case__ : Any = pre_tok_class(**__UpperCamelCase )
snake_case__ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : Dict = 'post_processor'
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
snake_case__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : List[Any] = tuple(state['sep'] )
if "cls" in state:
snake_case__ : List[str] = tuple(state['cls'] )
snake_case__ : int = False
if state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = True
if state.get('trim_offsets' , __UpperCamelCase ) != trim_offsets:
snake_case__ : Dict = trim_offsets
snake_case__ : List[Any] = True
if changes_to_apply:
snake_case__ : Union[str, Any] = getattr(__UpperCamelCase , state.pop('type' ) )
snake_case__ : int = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def __a ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
snake_case__ : Optional[Any] = value
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : str = kwargs.get('is_split_into_words' , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : Optional[int] = kwargs.get('is_split_into_words' , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 143 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_SCREAMING_SNAKE_CASE = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_SCREAMING_SNAKE_CASE = '▁'
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
_A = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_A = len(self.sp_model ) - 1
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return len(self.sp_model )
def UpperCAmelCase ( self ) -> int:
_A = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(lowerCAmelCase_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = []
_A = """"""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
_A = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __getstate__( self ) -> Any:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self , lowerCAmelCase_ ) -> List[str]:
_A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 357 | import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def snake_case ( snake_case__ :str = "dhaka" , snake_case__ :int = 5) -> int:
_A = min(snake_case__ , 50) # Prevent abuse!
_A = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_A = requests.get("""https://www.google.com/search""" , params=snake_case__ , headers=snake_case__)
_A = BeautifulSoup(html.text , """html.parser""")
_A = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script"""))))
_A = json.dumps(snake_case__)
_A = json.loads(snake_case__)
_A = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , snake_case__ , )
if not matched_google_image_data:
return 0
_A = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(snake_case__) , )
_A = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , snake_case__ , )
for index, fixed_full_res_image in enumerate(snake_case__):
if index >= max_images:
return index
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = urllib.request.build_opener()
_A = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(snake_case__)
_A = F'''query_{query.replace(' ' , '_')}'''
if not os.path.exists(snake_case__):
os.makedirs(snake_case__)
urllib.request.urlretrieve( # noqa: S310
snake_case__ , F'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 81 | 0 |
import numpy as np
from PIL import Image
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE : Dict = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = 0
return updated_arr
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : str = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__UpperCamelCase : Union[str, Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 182 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['GLPNFeatureExtractor']
__UpperCamelCase : List[str] = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 | 1 |
from collections.abc import Sequence
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: float ) -> float:
return sum(c * (x**i) for i, c in enumerate(lowercase_ ) )
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: float ) -> float:
A__ : Tuple = 0.0
for coeff in reversed(lowercase_ ):
A__ : Optional[Any] = result * x + coeff
return result
if __name__ == "__main__":
A_ : Tuple = (0.0, 0.0, 5.0, 9.3, 7.0)
A_ : Any = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 355 |
from __future__ import annotations
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.