code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
inspect_dataset(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = path + """.py"""
assert script_name in os.listdir(snake_case_ )
assert "__pycache__" not in os.listdir(snake_case_ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
inspect_metric(snake_case_ , snake_case_ )
snake_case__ : Any = path + """.py"""
assert script_name in os.listdir(snake_case_ )
assert "__pycache__" not in os.listdir(snake_case_ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_dataset_config_info(snake_case_ , config_name=snake_case_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
with pytest.raises(snake_case_ ):
get_dataset_config_info(snake_case_ , config_name=snake_case_ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : List[Any] = get_dataset_config_names(snake_case_ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : int = get_dataset_infos(snake_case_ )
assert list(infos.keys() ) == expected_configs
snake_case__ : str = expected_configs[0]
assert expected_config in infos
snake_case__ : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : str = get_dataset_infos(snake_case_ )
assert expected_config in infos
snake_case__ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
with pytest.raises(snake_case_ ):
get_dataset_split_names(snake_case_ , config_name=snake_case_ )
| 35 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _A ):
a : Tuple = str(_A )
return n == n[::-1]
def lowerCamelCase__ ( _A = 100_0000 ):
a : Optional[int] = 0
for i in range(1 , _A ):
if is_palindrome(_A ) and is_palindrome(bin(_A ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 363 |
'''simple docstring'''
lowerCAmelCase: Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase: Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = from_type.lower().strip('s' )
a : Optional[Any] = to_type.lower().strip('s' )
a : Dict = UNIT_SYMBOL.get(_A , _A )
a : Optional[Any] = UNIT_SYMBOL.get(_A , _A )
if from_sanitized not in METRIC_CONVERSION:
a : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
if to_sanitized not in METRIC_CONVERSION:
a : Union[str, Any] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
a : List[Any] = METRIC_CONVERSION[from_sanitized]
a : int = METRIC_CONVERSION[to_sanitized]
a : Tuple = 1
if from_exponent > to_exponent:
a : Optional[int] = from_exponent - to_exponent
else:
a : Optional[Any] = -(to_exponent - from_exponent)
return value * pow(10 , _A )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(__SCREAMING_SNAKE_CASE , **self.params )
return processed
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.loader )
def __iter__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(__SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(__SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str=None ) -> Any:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __iter__( self : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop("""is_last""" )
accumulator.append(__SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop("""is_last""" )
accumulator.append(__SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop("""is_last""" )
accumulator.append(__SCREAMING_SNAKE_CASE )
return accumulator
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self : List[str] ) -> Dict:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
return self.dataset[i][self.key]
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 331 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : Any = 'laion/clap-htsat-unfused'
a_ : Tuple = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.get_tokenizer()
a_ : int = self.get_feature_extractor()
a_ : Union[str, Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a_ : int = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
a_ : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
a_ : str = floats_list((3, 1_0_0_0) )
a_ : Any = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
a_ : List[Any] = processor(audios=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Dict = self.get_tokenizer()
a_ : Tuple = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'This is a test string'
a_ : List[str] = processor(text=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Tuple = self.get_feature_extractor()
a_ : Any = self.get_tokenizer()
a_ : Dict = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
a_ : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : Dict = self.get_feature_extractor()
a_ : Optional[Any] = self.get_tokenizer()
a_ : Optional[int] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 32 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
A: str = np.inf
def set_batch_size(__lowercase ) -> None:
nonlocal batch_size
if isinstance(A__ , A__ ):
A: Dict = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A__ , A__ ):
A: Union[str, Any] = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A__ , A__ ) and feature.dtype == "binary":
A: Any = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A__ , A__ )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : Optional[Features] = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
A: Optional[int] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
A: str = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A: Dict = Parquet(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , )
def _snake_case ( self : Dict ) -> str:
'''simple docstring'''
if self.streaming:
A: Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A: Optional[Any] = None
A: List[str] = None
A: Tuple = None
A: Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
A: List[str] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dataset , SCREAMING_SNAKE_CASE_ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any:
'''simple docstring'''
A: Dict = dataset
A: List[Any] = path_or_buf
A: List[Any] = batch_size or get_writer_batch_size(dataset.features )
A: int = parquet_writer_kwargs
def _snake_case ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A: Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A: Union[str, Any] = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
else:
A: Optional[Any] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
return written
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : BinaryIO , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
'''simple docstring'''
A: str = 0
A: str = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
A: Dict = self.dataset.features.arrow_schema
A: Optional[Any] = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A: List[str] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase_ )
written += batch.nbytes
writer.close()
return written
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = 'beit'
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=8192 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Union[str, Any]=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : int=1e-1_2 , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=[3, 5, 7, 11] , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[int]=0.4 , __lowerCAmelCase : List[Any]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=255 , **__lowerCAmelCase : List[str] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Tuple ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return 1e-4
| 289 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : int=1_00 , __A : Tuple=" " ) -> List[str]:
_SCREAMING_SNAKE_CASE = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def SCREAMING_SNAKE_CASE_ ( __A : dict ) -> dict:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else "" )
texts.append(__A )
return {"title": titles, "text": texts}
def SCREAMING_SNAKE_CASE_ ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ) -> dict:
_SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__A , padding="longest" , return_tensors="pt" )["input_ids"]
_SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def SCREAMING_SNAKE_CASE_ ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ) -> int:
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_SCREAMING_SNAKE_CASE = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_SCREAMING_SNAKE_CASE = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
_SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
_SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_SCREAMING_SNAKE_CASE = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_SCREAMING_SNAKE_CASE = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
_SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__A )
# And save the index
_SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase_ :
"""simple docstring"""
lowerCamelCase_ = field(
default=str(Path(A ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCamelCase_ = field(
default=A , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCamelCase_ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCamelCase_ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCamelCase_ = field(
default=str(Path(A ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowercase_ :
"""simple docstring"""
lowerCamelCase_ = field(
default=A , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowercase_ :
"""simple docstring"""
lowerCamelCase_ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCamelCase_ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 111 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''bert-generation'''
def __init__( self : int , __lowerCamelCase : int=5_0_3_5_8 , __lowerCamelCase : Union[str, Any]=1_0_2_4 , __lowerCamelCase : str=2_4 , __lowerCamelCase : int=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
| 111 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a__ : List[Any] = 6_37_81_37.0
a__ : Any = 6_35_67_52.31_42_45
a__ : Optional[Any] = 6_3_7_8_1_3_7
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCamelCase = atan((1 - flattening) * tan(radians(__A ) ) )
__UpperCamelCase = atan((1 - flattening) * tan(radians(__A ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCamelCase = haversine_distance(__A ,__A ,__A ,__A ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCamelCase = (b_lata + b_lata) / 2
__UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCamelCase = (sin(__A ) ** 2) * (cos(__A ) ** 2)
__UpperCamelCase = cos(sigma / 2 ) ** 2
__UpperCamelCase = (sigma - sin(__A )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCamelCase = (cos(__A ) ** 2) * (sin(__A ) ** 2)
__UpperCamelCase = sin(sigma / 2 ) ** 2
__UpperCamelCase = (sigma + sin(__A )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ : int = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = to_pil_image(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pil_image.size
__SCREAMING_SNAKE_CASE = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="dict" , config=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : Optional[Any] , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_value
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
__SCREAMING_SNAKE_CASE = apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for image in images:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
__SCREAMING_SNAKE_CASE = words_batch
__SCREAMING_SNAKE_CASE = boxes_batch
return data
| 355 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 195 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_a = 50_00_00
_a , _a = os.path.split(__file__)
_a = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, **UpperCamelCase_ : Tuple) -> Optional[Any]:
'''simple docstring'''
__lowercase = dataset.map(**UpperCamelCase_)
@get_duration
def _A ( UpperCamelCase_ : datasets.Dataset, **UpperCamelCase_ : str) -> Tuple:
'''simple docstring'''
__lowercase = dataset.filter(**UpperCamelCase_)
def _A ( ) -> Optional[int]:
'''simple docstring'''
__lowercase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
__lowercase = generate_example_dataset(
os.path.join(UpperCamelCase_, "dataset.arrow"), UpperCamelCase_, num_examples=UpperCamelCase_)
__lowercase = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=UpperCamelCase_)
def tokenize(UpperCamelCase_ : int):
return tokenizer(examples["text"])
__lowercase = map(UpperCamelCase_)
__lowercase = map(UpperCamelCase_, batched=UpperCamelCase_)
__lowercase = map(UpperCamelCase_, function=lambda UpperCamelCase_: None, batched=UpperCamelCase_)
with dataset.formatted_as(type="numpy"):
__lowercase = map(UpperCamelCase_, function=lambda UpperCamelCase_: None, batched=UpperCamelCase_)
with dataset.formatted_as(type="pandas"):
__lowercase = map(UpperCamelCase_, function=lambda UpperCamelCase_: None, batched=UpperCamelCase_)
with dataset.formatted_as(type="torch", columns="numbers"):
__lowercase = map(UpperCamelCase_, function=lambda UpperCamelCase_: None, batched=UpperCamelCase_)
with dataset.formatted_as(type="tensorflow", columns="numbers"):
__lowercase = map(UpperCamelCase_, function=lambda UpperCamelCase_: None, batched=UpperCamelCase_)
__lowercase = map(UpperCamelCase_, function=UpperCamelCase_, batched=UpperCamelCase_)
__lowercase = filter(UpperCamelCase_)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase_, "wb") as f:
f.write(json.dumps(UpperCamelCase_).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 17 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_validate_point(lowerCAmelCase_ )
_validate_point(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for item in point:
if not isinstance(lowerCAmelCase_ , (int, float) ):
_UpperCAmelCase : Any = (
"""Expected a list of numbers as input, found """
F'''{type(lowerCAmelCase_ ).__name__}'''
)
raise TypeError(lowerCAmelCase_ )
else:
_UpperCAmelCase : Optional[Any] = F'''Expected a list of numbers as input, found {type(lowerCAmelCase_ ).__name__}'''
raise TypeError(lowerCAmelCase_ )
else:
raise ValueError("""Missing an input""" )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_validate_point(lowerCAmelCase_ )
_validate_point(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
from PIL import Image
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(UpperCAmelCase_ ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(UpperCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
snake_case_ : List[str] = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 83 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : tuple[int, int] , UpperCamelCase : tuple[int, int] , UpperCamelCase : bool , ):
'''simple docstring'''
_a , _a = grid.shape
_a = [-1, 1, 0, 0]
_a = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_a , _a = [(0, source)], set()
_a = np.full((rows, cols) , np.inf )
_a = 0
_a = np.empty((rows, cols) , dtype=__UpperCamelCase )
_a = None
while queue:
((_a) , (_a)) = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_a = []
while (x, y) != source:
path.append((x, y) )
_a , _a = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
_a , _a = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_a = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
_a = dist + 1
_a = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( _a ):
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
_a = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( _a ):
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def cross_entropy(UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype('''f4''' )
_a = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(UpperCamelCase )
return loss
_a = partial(UpperCamelCase , reduction=jnp.mean )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3e-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase_ )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.collate_fn(lowerCAmelCase_ )
_a = jax.tree_util.tree_map(lowerCAmelCase_ , lowerCAmelCase_ )
return batch
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
_a , _a = self.fetch_inputs(features['''input_ids'''] )
_a = {
'''input_ids''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> List[Any]:
"""simple docstring"""
_a = [self._fetch_inputs(lowerCAmelCase_ ) for ids in input_ids]
return zip(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> str:
"""simple docstring"""
_a = [1 for _ in range(len(lowerCAmelCase_ ) )]
while len(lowerCAmelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict=None ):
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
def loss_fn(UpperCamelCase : List[str] ):
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
_a , _a , _a = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a , _a = jax.random.split(UpperCamelCase )
_a = jax.value_and_grad(UpperCamelCase )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_a = jax.lax.pmean(UpperCamelCase , '''batch''' )
_a = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
_a , _a , _a = outputs
_a = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A ( train_state.TrainState ):
lowercase_ = struct.field(pytree_node=_a )
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , loss_fn=lowerCAmelCase_ , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_a , _a = build_tx(**lowerCAmelCase_ )
_a = train_state.TrainState(
step=lowerCAmelCase_ , apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , opt_state=lowerCAmelCase_ , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(lowerCAmelCase_ )
return state
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = self.args
_a = len(lowerCAmelCase_ ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(lowerCAmelCase_ , args.batch_size , seed=lowerCAmelCase_ )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc=F'Running EPOCH-{epoch}' ):
_a = self.data_collator(lowerCAmelCase_ )
_a , _a , _a = self.train_step_fn(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase_ ) )
self.logger.log(lowerCAmelCase_ , commit=lowerCAmelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_a = get_batched_dataset(lowerCAmelCase_ , self.args.batch_size )
_a = len(lowerCAmelCase_ ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc='''Evaluating ... ''' ):
_a = self.data_collator(lowerCAmelCase_ )
_a = self.val_step_fn(lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
_a = jax_utils.unreplicate(lowerCAmelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase_ , params=state.params )
with open(os.path.join(lowerCAmelCase_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase_ )
print('''DONE''' )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(UpperCamelCase , '''args.joblib''' ) )
_a = joblib.load(os.path.join(UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_a = json.load(UpperCamelCase )
_a = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def weight_decay_mask(UpperCamelCase : Dict ):
_a = traverse_util.flatten_dict(UpperCamelCase )
_a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
_a = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 179 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class UpperCamelCase ( lowercase_ ):
lowercase = '''dpr'''
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase = 0 ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A__ ,**A__ )
lowercase_ : Any = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Tuple = num_attention_heads
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : List[str] = projection_dim
lowercase_ : Optional[int] = position_embedding_type
| 213 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE : List[str] = 'src/diffusers'
__SCREAMING_SNAKE_CASE : List[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : List[Any] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__SCREAMING_SNAKE_CASE : Any = spec.loader.load_module()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = object_name.split(""".""" )
snake_case_ = 0
# First let's find the module where our object lives.
snake_case_ = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) ):
i += 1
if i < len(lowerCAmelCase__ ):
snake_case_ = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ = f.readlines()
# Now let's find the class / func in the code!
snake_case_ = ''''''
snake_case_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'<FILL\s+[^>]*>')
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = code.split("""\n""" )
snake_case_ = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
snake_case_ = f"""class Bla:\n{code}"""
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
snake_case_ = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
snake_case_ = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ = f.readlines()
snake_case_ = []
snake_case_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
snake_case_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ = search.groups()
snake_case_ = find_code_in_diffusers(lowerCAmelCase__ )
snake_case_ = get_indent(lowerCAmelCase__ )
snake_case_ = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ = theoretical_indent
snake_case_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
snake_case_ = lines[line_index]
snake_case_ = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ = lines[start_index:line_index]
snake_case_ = ''''''.join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
snake_case_ = '''\n'''.join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
snake_case_ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
snake_case_ = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ = pattern.groups()
snake_case_ = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
snake_case_ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
snake_case_ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def _a ( _SCREAMING_SNAKE_CASE = False ) -> Dict:
snake_case_ = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
snake_case_ = []
for filename in all_files:
snake_case_ = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
snake_case_ = '''\n'''.join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 352 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = ["""image_processor""", """tokenizer"""]
__lowercase: Dict = """AutoImageProcessor"""
__lowercase: List[Any] = """AutoTokenizer"""
def __init__( self : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
snake_case_ = kwargs.pop("""feature_extractor""" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.image_processor
snake_case_ = False
def __call__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = kwargs.pop("""images""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""text""" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
snake_case_ = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
snake_case_ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->str:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.image_processor
snake_case_ = False
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : str=None ) ->Tuple:
"""simple docstring"""
if added_vocab is None:
snake_case_ = self.tokenizer.get_added_vocab()
snake_case_ = {}
while tokens:
snake_case_ = re.search(R"""<s_(.*?)>""" , UpperCAmelCase_ , re.IGNORECASE )
if start_token is None:
break
snake_case_ = start_token.group(1 )
snake_case_ = re.search(RF"""</s_{key}>""" , UpperCAmelCase_ , re.IGNORECASE )
snake_case_ = start_token.group()
if end_token is None:
snake_case_ = tokens.replace(UpperCAmelCase_ , """""" )
else:
snake_case_ = end_token.group()
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase_ , re.IGNORECASE )
if content is not None:
snake_case_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case_ = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if value:
if len(UpperCAmelCase_ ) == 1:
snake_case_ = value[0]
snake_case_ = value
else: # leaf nodes
snake_case_ = []
for leaf in content.split(R"""<sep/>""" ):
snake_case_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case_ = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase_ )
if len(output[key] ) == 1:
snake_case_ = output[key][0]
snake_case_ = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , )
return self.image_processor
| 233 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( a_ ):
'''simple docstring'''
A = (DEISMultistepScheduler,)
A = (("num_inference_steps", 2_5),)
def a_ (self , **_UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Dict = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**_A )
return config
def a_ (self , _UpperCAmelCase=0 , **_UpperCAmelCase ) -> Any:
__UpperCamelCase : Any = dict(self.forward_default_kwargs )
__UpperCamelCase : Optional[int] = kwargs.pop("num_inference_steps" , _A )
__UpperCamelCase : Union[str, Any] = self.dummy_sample
__UpperCamelCase : Optional[int] = 0.1 * sample
__UpperCamelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : Dict = self.get_scheduler_config(**_A )
__UpperCamelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
__UpperCamelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__UpperCamelCase : Dict = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
__UpperCamelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase : Dict = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
__UpperCamelCase : Optional[int] = scheduler.step(_A , _A , _A , **_A ).prev_sample
__UpperCamelCase : List[str] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ (self ) -> Optional[int]:
pass
def a_ (self , _UpperCAmelCase=0 , **_UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__UpperCamelCase : Dict = kwargs.pop("num_inference_steps" , _A )
__UpperCamelCase : Optional[Any] = self.dummy_sample
__UpperCamelCase : Optional[Any] = 0.1 * sample
__UpperCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : Optional[int] = self.get_scheduler_config()
__UpperCamelCase : Dict = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__UpperCamelCase : Tuple = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase : Dict = scheduler.step(_A , _A , _A , **_A ).prev_sample
__UpperCamelCase : Any = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ (self , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
if scheduler is None:
__UpperCamelCase : Dict = self.scheduler_classes[0]
__UpperCamelCase : List[str] = self.get_scheduler_config(**_A )
__UpperCamelCase : Dict = scheduler_class(**_A )
__UpperCamelCase : Any = self.scheduler_classes[0]
__UpperCamelCase : int = self.get_scheduler_config(**_A )
__UpperCamelCase : Tuple = scheduler_class(**_A )
__UpperCamelCase : List[Any] = 1_0
__UpperCamelCase : str = self.dummy_model()
__UpperCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Tuple = model(_A , _A )
__UpperCamelCase : Dict = scheduler.step(_A , _A , _A ).prev_sample
return sample
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = dict(self.forward_default_kwargs )
__UpperCamelCase : List[str] = kwargs.pop("num_inference_steps" , _A )
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : List[Any] = self.get_scheduler_config()
__UpperCamelCase : List[Any] = scheduler_class(**_A )
__UpperCamelCase : int = self.dummy_sample
__UpperCamelCase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , "set_timesteps" ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , "set_timesteps" ):
__UpperCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
__UpperCamelCase : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
__UpperCamelCase : Union[str, Any] = scheduler.timesteps[5]
__UpperCamelCase : List[Any] = scheduler.timesteps[6]
__UpperCamelCase : Tuple = scheduler.step(_A , _A , _A , **_A ).prev_sample
__UpperCamelCase : Any = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
__UpperCamelCase : Tuple = self.full_loop(scheduler=_A )
__UpperCamelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
__UpperCamelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__UpperCamelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase : Any = self.full_loop(scheduler=_A )
__UpperCamelCase : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def a_ (self ) -> str:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def a_ (self ) -> List[str]:
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type="deis" , solver_order=_A , solver_type=_A , )
def a_ (self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def a_ (self ) -> Dict:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
__UpperCamelCase : Optional[Any] = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def a_ (self ) -> Tuple:
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def a_ (self ) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.full_loop()
__UpperCamelCase : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def a_ (self ) -> Any:
__UpperCamelCase : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCamelCase : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
__UpperCamelCase : List[str] = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
__UpperCamelCase : Optional[int] = scheduler_class(**_A )
__UpperCamelCase : Any = 1_0
__UpperCamelCase : Optional[Any] = self.dummy_model()
__UpperCamelCase : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Any = model(_A , _A )
__UpperCamelCase : Optional[int] = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
| 298 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """pegasus"""
lowercase_ = ["""past_key_values"""]
lowercase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE : int=50_265 , SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , SCREAMING_SNAKE_CASE : Dict=12 , SCREAMING_SNAKE_CASE : Optional[int]=4_096 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : Any=4_096 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Dict=1_024 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , **SCREAMING_SNAKE_CASE : Optional[int] , ):
lowercase__ : List[Any] = vocab_size
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : List[Any] = d_model
lowercase__ : int = encoder_ffn_dim
lowercase__ : int = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : str = decoder_ffn_dim
lowercase__ : str = decoder_layers
lowercase__ : List[Any] = decoder_attention_heads
lowercase__ : Optional[int] = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Union[str, Any] = activation_function
lowercase__ : List[str] = init_std
lowercase__ : Tuple = encoder_layerdrop
lowercase__ : int = decoder_layerdrop
lowercase__ : Any = use_cache
lowercase__ : int = encoder_layers
lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@property
def snake_case ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Dict ):
return self.d_model
| 361 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_vision_model"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : List[Any]=288 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[str]=1E-0_5 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=False , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : str = num_channels
lowercase__ : Optional[int] = patch_size
lowercase__ : Dict = image_size
lowercase__ : List[Any] = initializer_factor
lowercase__ : int = layer_norm_eps
lowercase__ : List[str] = stop_gradient
lowercase__ : Optional[int] = share_layernorm
lowercase__ : Optional[int] = remove_last_layer
@classmethod
def snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_text_model"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=50_265 , SCREAMING_SNAKE_CASE : Optional[int]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=514 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Dict=1E-0_5 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE : int=True , **SCREAMING_SNAKE_CASE : int , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : Optional[Any] = initializer_factor
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : str = layer_norm_eps
lowercase__ : Dict = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : List[str] = pad_token_id
lowercase__ : Optional[Any] = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-0_5 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[Any]="add" , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=6 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : List[Any] , ):
# TODO: remove this once the Hub files are updated.
lowercase__ : int = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE )
lowercase__ : int = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = share_cross_modal_transformer_layers
lowercase__ : int = hidden_act
lowercase__ : int = hidden_size
lowercase__ : Optional[int] = initializer_factor
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : str = share_link_tower_layers
lowercase__ : Optional[int] = link_tower_type
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = tie_word_embeddings
lowercase__ : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
lowercase__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
lowercase__ : List[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
lowercase__ : Any = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : BridgeTowerTextConfig , SCREAMING_SNAKE_CASE : BridgeTowerVisionConfig , **SCREAMING_SNAKE_CASE : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.text_config.to_dict()
lowercase__ : Tuple = self.vision_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 121 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = ["input_values", "attention_mask"]
def __init__( self , __A = 1 , __A = 1_6000 , __A = 0.0 , __A = False , __A = 80 , __A = 16 , __A = 64 , __A = "hann_window" , __A = 1.0 , __A = 80 , __A = 7600 , __A = 1E-10 , __A = 2 , __A = True , **__A , ) -> Tuple:
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
lowerCAmelCase_ :Optional[int] = do_normalize
lowerCAmelCase_ :str = return_attention_mask
lowerCAmelCase_ :Optional[int] = num_mel_bins
lowerCAmelCase_ :Union[str, Any] = hop_length
lowerCAmelCase_ :List[Any] = win_length
lowerCAmelCase_ :str = win_function
lowerCAmelCase_ :str = frame_signal_scale
lowerCAmelCase_ :List[Any] = fmin
lowerCAmelCase_ :List[Any] = fmax
lowerCAmelCase_ :Optional[Any] = mel_floor
lowerCAmelCase_ :Optional[int] = reduction_factor
lowerCAmelCase_ :Optional[int] = win_length * sampling_rate // 1000
lowerCAmelCase_ :Optional[Any] = hop_length * sampling_rate // 1000
lowerCAmelCase_ :Optional[int] = optimal_fft_length(self.sample_size )
lowerCAmelCase_ :str = (self.n_fft // 2) + 1
lowerCAmelCase_ :List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
lowerCAmelCase_ :str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( __A , __A , __A = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase_ :Tuple = np.array(__A , np.intaa )
lowerCAmelCase_ :Union[str, Any] = []
for vector, length in zip(__A , attention_mask.sum(-1 ) ):
lowerCAmelCase_ :Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ :Any = padding_value
normed_input_values.append(__A )
else:
lowerCAmelCase_ :Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __lowerCAmelCase ( self , __A , ) -> np.ndarray:
lowerCAmelCase_ :Tuple = spectrogram(
__A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , __A = None , __A = None , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
lowerCAmelCase_ :List[str] = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
else:
lowerCAmelCase_ :List[str] = None
if audio_target is not None:
lowerCAmelCase_ :int = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase_ :Any = inputs_target["""input_values"""]
lowerCAmelCase_ :Dict = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase_ :Union[str, Any] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self , __A , __A = False , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature:
lowerCAmelCase_ :List[str] = isinstance(__A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase_ :int = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ :Optional[Any] = [np.asarray(__A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
lowerCAmelCase_ :str = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ :List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ :Tuple = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase_ :Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase_ :str = [self._extract_mel_features(__A ) for waveform in speech]
lowerCAmelCase_ :int = BatchFeature({"""input_values""": features} )
lowerCAmelCase_ :Optional[int] = self.num_mel_bins
else:
lowerCAmelCase_ :Optional[int] = BatchFeature({"""input_values""": speech} )
lowerCAmelCase_ :List[Any] = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
lowerCAmelCase_ :Any = feature_size_hack
# convert input values to correct format
lowerCAmelCase_ :Optional[int] = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase_ :Any = [np.asarray(__A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase_ :Any = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ :Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase_ :Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase_ :Optional[int] = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase_ :Any = (
attention_mask
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ :Dict = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__A , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase_ :str = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
lowerCAmelCase_ :Optional[int] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase_ :Dict = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase_ :Optional[int] = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 1 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
snake_case : int = 0
snake_case : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case : Tuple = i + 1
else:
snake_case : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
'''simple docstring'''
def __lowerCAmelCase ():
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCAmelCase (__lowerCAmelCase ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : Union[str, Any] = (left + right) // 2
_UpperCAmelCase : List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __lowerCAmelCase (__lowerCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __lowerCAmelCase ():
from timeit import timeit
print("Running benchmarks" )
_UpperCAmelCase : Tuple = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : str = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 234 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[str] = 1_024
_UpperCAmelCase : Optional[int] = 4_096
_UpperCAmelCase : Union[str, Any] = 24
_UpperCAmelCase : List[Any] = 16
_UpperCAmelCase : List[Any] = [5, 11, 17, 23]
_UpperCAmelCase : int = [256, 512, 1_024, 1_024]
_UpperCAmelCase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[Any] = 150
_UpperCAmelCase : Optional[Any] = "huggingface/label-files"
_UpperCAmelCase : Optional[int] = "ade20k-id2label.json"
_UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : str = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_UpperCAmelCase : Dict = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : int = name.replace("proj" , "projection" )
if "blocks" in name:
_UpperCAmelCase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_UpperCAmelCase : List[str] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_UpperCAmelCase : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_UpperCAmelCase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_UpperCAmelCase : Tuple = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_UpperCAmelCase : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : List[str] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_UpperCAmelCase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : Tuple = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
_UpperCAmelCase : Dict = name.replace("bn" , "batch_norm" )
if "head" in name:
_UpperCAmelCase : Tuple = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_UpperCAmelCase : Dict = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : int = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
_UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : Tuple = state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
_UpperCAmelCase : Any = DPTForSemanticSegmentation(__lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Any = 480 if "ade" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=__lowerCAmelCase )
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : Dict = image_processor(__lowerCAmelCase , return_tensors="pt" )
# forward pass
_UpperCAmelCase : Tuple = model(**__lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
# Assert logits
_UpperCAmelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowerCAmelCase )
)
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234 | 1 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__SCREAMING_SNAKE_CASE :List[Any] = '''Usage of script: script_name <size_of_canvas:int>'''
__SCREAMING_SNAKE_CASE :Any = [0] * 100 + [1] * 10
random.shuffle(choice)
def UpperCAmelCase_ ( __lowercase : int ) -> list[list[bool]]:
'''simple docstring'''
_UpperCAmelCase = [[False for i in range(__lowercase )] for j in range(__lowercase )]
return canvas
def UpperCAmelCase_ ( __lowercase : list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(__lowercase ):
for j, _ in enumerate(__lowercase ):
_UpperCAmelCase = bool(random.getrandbits(1 ) )
def UpperCAmelCase_ ( __lowercase : list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
_UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowercase ):
for c, pt in enumerate(__lowercase ):
_UpperCAmelCase = __judge_point(
__lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCAmelCase = current_canvas.tolist()
return return_canvas
def UpperCAmelCase_ ( __lowercase : bool , __lowercase : list[list[bool]] ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCAmelCase = pt
if pt:
if alive < 2:
_UpperCAmelCase = False
elif alive == 2 or alive == 3:
_UpperCAmelCase = True
elif alive > 3:
_UpperCAmelCase = False
else:
if alive == 3:
_UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__SCREAMING_SNAKE_CASE :Optional[Any] = int(sys.argv[1])
# main working structure of this module.
__SCREAMING_SNAKE_CASE :Any = create_canvas(canvas_size)
seed(c)
__SCREAMING_SNAKE_CASE :Any = plt.subplots()
fig.show()
__SCREAMING_SNAKE_CASE :Tuple = ListedColormap(['''w''', '''k'''])
try:
while True:
__SCREAMING_SNAKE_CASE :Tuple = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 356 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 100_0000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = limit + 1
_UpperCAmelCase = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 156 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __snake_case ( UpperCAmelCase_ : Dict ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCamelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCamelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCamelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCamelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCamelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCamelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCamelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
lowerCamelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
lowerCamelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCamelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCamelCase_ = key.replace("text_projection" , "flava.text_projection" )
lowerCamelCase_ = key.replace("image_projection" , "flava.image_projection" )
lowerCamelCase_ = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase_ = value
return upgrade
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ):
if config_path is not None:
lowerCamelCase_ = FlavaConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = FlavaConfig()
lowerCamelCase_ = FlavaForPreTraining(UpperCAmelCase_ ).eval()
lowerCamelCase_ = convert_dalle_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , save_checkpoint=UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" )
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" )
lowerCamelCase_ = upgrade_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.load_state_dict(UpperCAmelCase_ )
lowerCamelCase_ = hf_model.state_dict()
lowerCamelCase_ = count_parameters(UpperCAmelCase_ )
lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) + count_parameters(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a_ : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 55 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCamelCase ( a_ , a_ ):
'''simple docstring'''
a_ : Optional[Any] = '''nat'''
a_ : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , a_ : int=4 , a_ : Union[str, Any]=3 , a_ : List[Any]=64 , a_ : str=[3, 4, 6, 5] , a_ : Dict=[2, 4, 8, 16] , a_ : List[str]=7 , a_ : Optional[Any]=3.0 , a_ : Dict=True , a_ : List[Any]=0.0 , a_ : Tuple=0.0 , a_ : List[str]=0.1 , a_ : Optional[int]="gelu" , a_ : Optional[Any]=0.02 , a_ : List[Any]=1e-5 , a_ : Tuple=0.0 , a_ : List[Any]=None , a_ : List[Any]=None , **a_ : List[str] , ):
super().__init__(**lowercase_ )
lowerCAmelCase_ : Union[str, Any] = patch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : str = embed_dim
lowerCAmelCase_ : List[Any] = depths
lowerCAmelCase_ : Optional[Any] = len(lowercase_ )
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : Optional[int] = kernel_size
lowerCAmelCase_ : Union[str, Any] = mlp_ratio
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Dict = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 364 |
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
lowerCAmelCase_ : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
lowerCAmelCase_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase_ : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase_ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase_ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase_ : List[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase_ : str = temp
return maximum
if __name__ == "__main__":
print(solution())
| 161 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int:
_a , _a : Union[str, Any] = 1, 1
_a : Dict = 2
while True:
_a : Any = 0
_a : Optional[Any] = fa + fa
_a , _a : Union[str, Any] = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
A__ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ElectraTokenizer
def __init__( self :int ,__lowercase :str=None ,__lowercase :int=None ,__lowercase :Tuple=True ,__lowercase :str="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :Optional[int]="[PAD]" ,__lowercase :Tuple="[CLS]" ,__lowercase :Any="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :Any=None ,**__lowercase :Dict ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : str = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : Tuple = do_lower_case
snake_case__ : List[str] = strip_accents
snake_case__ : Dict = tokenize_chinese_chars
snake_case__ : Optional[Any] = normalizer_class(**__lowercase )
snake_case__ : List[Any] = do_lower_case
def __lowerCamelCase ( self :Tuple ,__lowercase :Tuple ,__lowercase :Any=None ):
snake_case__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Any ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Any = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 44 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
__lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = length
__lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.length
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = True
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a + self.b
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : int = 16 ) -> int:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase = load_dataset("""csv""" , data_files=snake_case_ )
__lowerCAmelCase = datasets["""train"""].unique("""label""" )
__lowerCAmelCase = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
__lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 229 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : Optional[int] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase__ : List[Any] = HfArgumentParser(InitializationArguments)
lowercase__ : List[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase__ : List[str] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowercase__ : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase__ : Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 354 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase__ : Dict = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Dict = numpy_to_pil(__snake_case )
return images
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
if images.ndim == 3:
__A : List[Any] = images[None, ...]
__A : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__A : str = [Image.fromarray(__snake_case ) for image in images]
return pil_images | 190 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : Optional[int] = MvpTokenizer
snake_case_ : str = MvpTokenizerFast
snake_case_ : Dict = True
snake_case_ : Any = filter_roberta_detectors
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCamelCase ( self : Tuple , **snake_case__ : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : List[str] , **snake_case__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors="pt" )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case__ , snake_case__ )
# Test that special tokens are reset
@require_torch
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case__ , padding=snake_case__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("labels" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
@require_torch
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(text_target=snake_case__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=snake_case__ , truncation=snake_case__ , return_tensors="pt" )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = ["A long paragraph for summarization."]
_UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case__ , text_target=snake_case__ , return_tensors="pt" )
_UpperCAmelCase = inputs["input_ids"]
_UpperCAmelCase = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_UpperCAmelCase = "A, <mask> AllenNLP sentence."
_UpperCAmelCase = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
_UpperCAmelCase = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 133 |
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000 ):
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 133 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21 | '''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case =16
__snake_case =32
def a_ ( lowerCamelCase : Accelerator , lowerCamelCase : DatasetDict , lowerCamelCase : List[int] , lowerCamelCase : List[int] , lowerCamelCase : int = 16 ):
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase = DatasetDict(
{
'train': dataset['train'].select(lowerCamelCase ),
'validation': dataset['train'].select(lowerCamelCase ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
lowerCamelCase , padding='longest' , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
lowerCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
lowerCAmelCase = DataLoader(
tokenized_datasets['test'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] ):
# New Code #
lowerCAmelCase = []
# Download the dataset
lowerCAmelCase = load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config['lr']
lowerCAmelCase = int(config['num_epochs'] )
lowerCAmelCase = int(config['seed'] )
lowerCAmelCase = int(config['batch_size'] )
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase )
# New Code #
# Create our folds:
lowerCAmelCase = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_fold_dataloaders(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.loss
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase = []
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.logits
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase = torch.cat(lowerCamelCase , dim=0 )
lowerCAmelCase = torch.stack(lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase = metric.compute(predictions=lowerCamelCase , references=lowerCamelCase )
accelerator.print('Average test metrics from all folds:' , lowerCamelCase )
def a_ ( ):
lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase , default=lowerCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=lowerCamelCase , default=3 , help='The number of splits to perform across the dataset' )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 4 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Any:
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict[str, GenericTensor]:
A__ = self.framework
A__ = self.tokenizer(__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,**__UpperCAmelCase )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[int]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 154 | """simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
A__ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: str = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _a ( lowerCAmelCase__):
"""simple docstring"""
UpperCamelCase__ = """speech_to_text_2"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Optional[int] , __lowerCamelCase: Any=1_0000 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: List[str]=2048 , __lowerCamelCase: int=4 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[Any]="relu" , __lowerCamelCase: Optional[Any]=256 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: str=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Any=2 , __lowerCamelCase: Dict=True , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Union[str, Any]=1024 , **__lowerCamelCase: Dict , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = vocab_size
UpperCamelCase__: str = d_model
UpperCamelCase__: Optional[Any] = decoder_ffn_dim
UpperCamelCase__: Tuple = decoder_layers
UpperCamelCase__: List[Any] = decoder_attention_heads
UpperCamelCase__: List[Any] = dropout
UpperCamelCase__: Dict = attention_dropout
UpperCamelCase__: Optional[Any] = activation_dropout
UpperCamelCase__: List[Any] = activation_function
UpperCamelCase__: List[Any] = init_std
UpperCamelCase__: List[str] = decoder_layerdrop
UpperCamelCase__: List[Any] = use_cache
UpperCamelCase__: List[str] = decoder_layers
UpperCamelCase__: Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__: Optional[int] = max_target_positions
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 149 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(a__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE_ = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(a__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(a__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE_ = array(a__ )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE_ = array(a__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(a__ )
# Calculate the inverse of the matrix
return [[float(d(a__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 369 |
__UpperCAmelCase = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while place < len(__lowerCamelCase ):
if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = divmod(__lowerCamelCase, __lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( UpperCamelCase_ : Dict) -> Dict:
'''simple docstring'''
def is_in_circle(UpperCamelCase_ : str, UpperCamelCase_ : str) -> bool:
__lowercase = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowercase = mean(
int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
for _ in range(_UpperCamelCase))
# The ratio of the area for circle to square is pi/4.
__lowercase = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Any, UpperCamelCase_ : Dict = 0.0, UpperCamelCase_ : List[Any] = 1.0, ) -> Dict:
'''simple docstring'''
return mean(
function_to_integrate(uniform(_UpperCamelCase, _UpperCamelCase)) for _ in range(_UpperCamelCase)) * (max_value - min_value)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : Tuple = 0.0, UpperCamelCase_ : Union[str, Any] = 1.0) -> Optional[Any]:
'''simple docstring'''
def identity_function(UpperCamelCase_ : Optional[int]) -> float:
return x
__lowercase = area_under_curve_estimator(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase)
__lowercase = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print("******************")
def _A ( UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
def function_to_integrate(UpperCamelCase_ : Tuple) -> float:
return sqrt(4.0 - x * x)
__lowercase = area_under_curve_estimator(
_UpperCamelCase, _UpperCamelCase, 0.0, 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_SCREAMING_SNAKE_CASE = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_SCREAMING_SNAKE_CASE = logging.WARNING
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = os.getenv('DATASETS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( ):
return __name__.split('.' )[0]
def SCREAMING_SNAKE_CASE__ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( ):
# Apply our default configuration to the library root logger.
snake_case_ : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def SCREAMING_SNAKE_CASE__ ( __a = None ):
if name is None:
snake_case_ : List[Any] = _get_library_name()
return logging.getLogger(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( __a ):
_get_library_root_logger().setLevel(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE_ :
def __init__( self : int , *_A : str , **_A : Dict ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
snake_case_ : Optional[int] = args[0] if args else None
def __iter__( self : List[str] ) -> Any:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , _A : int ) -> Tuple:
"""simple docstring"""
def empty_fn(*_A : int , **_A : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , _A : int , _A : Any , _A : int ) -> Dict:
"""simple docstring"""
return
_SCREAMING_SNAKE_CASE = True
class SCREAMING_SNAKE_CASE_ :
def __call__( self : Optional[Any] , *_A : Optional[Any] , _A : Tuple=False , **_A : Optional[Any] ) -> Dict:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def UpperCAmelCase_ ( self : Any , *_A : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_SCREAMING_SNAKE_CASE = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case_ : str = True
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case_ : str = False
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
from collections.abc import Sequence
def _UpperCAmelCase ( snake_case , snake_case = False ):
"""simple docstring"""
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(snake_case , snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 82 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 162 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[str] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 |
_snake_case : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_snake_case : List[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = from_type.lower().strip('s' )
__lowerCAmelCase = to_type.lower().strip('s' )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10, lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 | 1 |
import math
import unittest
def lowercase_ ( _lowerCamelCase : int):
assert isinstance(_lowerCamelCase , _lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 87 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 | 0 |
from __future__ import annotations
from collections.abc import Callable
_UpperCAmelCase = list[list[float | int]]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Matrix:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = [[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase_ )]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for row in range(UpperCamelCase_ ):
for col in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[row][col]
UpperCamelCase_ = vector[row][0]
UpperCamelCase_ = 0
UpperCamelCase_ = 0
while row < size and col < size:
# pivoting
UpperCamelCase_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase_ , UpperCamelCase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase_ , UpperCamelCase_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = augmented[rowa][col] / augmented[row][col]
UpperCamelCase_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCamelCase_ ):
for row in range(UpperCamelCase_ ):
UpperCamelCase_ = augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCamelCase_ )
]
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Callable[[int], int]:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = [[0 for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ )]
UpperCamelCase_ = [[0] for _ in range(UpperCamelCase_ )]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for x_val, y_val in enumerate(UpperCamelCase_ ):
for col in range(UpperCamelCase_ ):
UpperCamelCase_ = (x_val + 1) ** (size - col - 1)
UpperCamelCase_ = y_val
UpperCamelCase_ = solve(UpperCamelCase_ , UpperCamelCase_ )
def interpolated_func(UpperCamelCase_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase_ ) )
return interpolated_func
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( UpperCamelCase_ = question_function , UpperCamelCase_ = 10 ) -> int:
UpperCamelCase_ = [func(UpperCamelCase_ ) for x_val in range(1 , order + 1 )]
UpperCamelCase_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCamelCase_ = 0
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for poly in polynomials:
UpperCamelCase_ = 1
while func(UpperCamelCase_ ) == poly(UpperCamelCase_ ):
x_val += 1
ret += poly(UpperCamelCase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 328 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = [int(_lowerCamelCase ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(_lowerCamelCase ) == 4 and all(0 <= int(_lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = input().strip()
UpperCamelCase__ : Any = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.") | 112 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ : List[Any] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
UpperCamelCase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase_ ( _lowerCamelCase: tuple ):
return x[0]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = get_letter_count(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = """""".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = get_frequency_order(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
snake_case : int = ksize + 1
snake_case : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
# distance from center
snake_case : Optional[Any] = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : int = theta / 180 * np.pi
snake_case : Tuple = np.cos(_theta )
snake_case : Tuple = np.sin(_theta )
# get kernel x
snake_case : Tuple = cos_theta * px + sin_theta * py
# get kernel y
snake_case : Dict = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase__ = imread("../image_data/lena.jpg")
# turn image in gray scale value
lowercase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase__ = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
lowercase__ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase__ = out / out.max() * 2_5_5
lowercase__ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 366 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class snake_case__ ( nn.Module ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = (16, 32, 96, 256)
lowerCamelCase = jnp.floataa
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case : Union[str, Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case : Optional[Any] = self.block_out_channels[i]
snake_case : Optional[int] = self.block_out_channels[i + 1]
snake_case : Optional[int] = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
snake_case : Optional[int] = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
snake_case : Tuple = blocks
snake_case : Tuple = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , UpperCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
snake_case : Dict = self.conv_in(UpperCamelCase__ )
snake_case : int = nn.silu(UpperCamelCase__ )
for block in self.blocks:
snake_case : str = block(UpperCamelCase__ )
snake_case : Optional[Any] = nn.silu(UpperCamelCase__ )
snake_case : Optional[Any] = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class snake_case__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = 32
lowerCamelCase = 4
lowerCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase = False
lowerCamelCase = (320, 640, 1280, 1280)
lowerCamelCase = 2
lowerCamelCase = 8
lowerCamelCase = None
lowerCamelCase = 1280
lowerCamelCase = 0.0
lowerCamelCase = False
lowerCamelCase = jnp.floataa
lowerCamelCase = True
lowerCamelCase = 0
lowerCamelCase = "rgb"
lowerCamelCase = (16, 32, 96, 256)
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
snake_case : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
snake_case : Dict = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : List[str] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case : Optional[int] = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case : int = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
snake_case ,snake_case : Optional[int] = jax.random.split(UpperCamelCase__ )
snake_case : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = self.block_out_channels
snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
snake_case : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : List[Any] = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
snake_case : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case : Any = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : str = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : str = []
snake_case : List[str] = []
snake_case : Union[str, Any] = block_out_channels[0]
snake_case : Tuple = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : Dict = output_channel
snake_case : Union[str, Any] = block_out_channels[i]
snake_case : Tuple = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case : str = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
snake_case : Union[str, Any] = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
snake_case : str = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
snake_case : List[Any] = down_blocks
snake_case : List[Any] = controlnet_down_blocks
# mid
snake_case : Optional[int] = block_out_channels[-1]
snake_case : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case : List[Any] = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case : Dict = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
snake_case : str = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps.astype(dtype=jnp.floataa )
snake_case : Optional[Any] = jnp.expand_dims(UpperCamelCase__ , 0 )
snake_case : int = self.time_proj(UpperCamelCase__ )
snake_case : Tuple = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
snake_case : Dict = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.conv_in(UpperCamelCase__ )
snake_case : str = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
snake_case : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case ,snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
snake_case ,snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case : List[str] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
snake_case : Any = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[Any] = controlnet_down_block_res_samples
snake_case : int = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
snake_case : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 83 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
__A : Any = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
__A : List[str] = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
__A : Union[str, Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE ) ),
}
| 154 |
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
assert (
isinstance(_A , _A ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCamelCase_ , lowerCamelCase_ =1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_ , lowerCamelCase_ =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowercase_ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowercase_ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__snake_case : Tuple = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : int = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : List[str] = float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )
__snake_case : List[Any] = float(spearmanr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : Optional[int] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( a_: int, a_: Tuple ):
_UpperCAmelCase : str = checkpoint
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Optional[Any] = vae_state_dict["encoder.conv_in.weight"]
_UpperCAmelCase : Union[str, Any] = vae_state_dict["encoder.conv_in.bias"]
_UpperCAmelCase : Optional[Any] = vae_state_dict["encoder.conv_out.weight"]
_UpperCAmelCase : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
_UpperCAmelCase : Optional[Any] = vae_state_dict["encoder.norm_out.weight"]
_UpperCAmelCase : List[Any] = vae_state_dict["encoder.norm_out.bias"]
_UpperCAmelCase : Tuple = vae_state_dict["decoder.conv_in.weight"]
_UpperCAmelCase : List[Any] = vae_state_dict["decoder.conv_in.bias"]
_UpperCAmelCase : Any = vae_state_dict["decoder.conv_out.weight"]
_UpperCAmelCase : Optional[Any] = vae_state_dict["decoder.conv_out.bias"]
_UpperCAmelCase : Any = vae_state_dict["decoder.norm_out.weight"]
_UpperCAmelCase : List[str] = vae_state_dict["decoder.norm_out.bias"]
_UpperCAmelCase : Tuple = vae_state_dict["quant_conv.weight"]
_UpperCAmelCase : Any = vae_state_dict["quant_conv.bias"]
_UpperCAmelCase : int = vae_state_dict["post_quant_conv.weight"]
_UpperCAmelCase : Optional[int] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
_UpperCAmelCase : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_UpperCAmelCase : List[str] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(a_ )
}
# Retrieves the keys for the decoder up blocks only
_UpperCAmelCase : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_UpperCAmelCase : List[Any] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(a_ )
}
for i in range(a_ ):
_UpperCAmelCase : Optional[Any] = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_UpperCAmelCase : Optional[int] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_UpperCAmelCase : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_UpperCAmelCase : Any = renew_vae_resnet_paths(a_ )
_UpperCAmelCase : Dict = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
_UpperCAmelCase : str = [key for key in vae_state_dict if "encoder.mid.block" in key]
_UpperCAmelCase : Optional[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
_UpperCAmelCase : Optional[Any] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_UpperCAmelCase : Optional[Any] = renew_vae_resnet_paths(a_ )
_UpperCAmelCase : Tuple = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
_UpperCAmelCase : int = [key for key in vae_state_dict if "encoder.mid.attn" in key]
_UpperCAmelCase : List[Any] = renew_vae_attention_paths(a_ )
_UpperCAmelCase : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
conv_attn_to_linear(a_ )
for i in range(a_ ):
_UpperCAmelCase : str = num_up_blocks - 1 - i
_UpperCAmelCase : str = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_UpperCAmelCase : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_UpperCAmelCase : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_UpperCAmelCase : int = renew_vae_resnet_paths(a_ )
_UpperCAmelCase : Optional[Any] = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
_UpperCAmelCase : List[Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
_UpperCAmelCase : Dict = 2
for i in range(1, num_mid_res_blocks + 1 ):
_UpperCAmelCase : Optional[int] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_UpperCAmelCase : Tuple = renew_vae_resnet_paths(a_ )
_UpperCAmelCase : List[str] = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
_UpperCAmelCase : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
_UpperCAmelCase : Dict = renew_vae_attention_paths(a_ )
_UpperCAmelCase : List[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(a_, a_, a_, additional_replacements=[meta_path], config=a_ )
conv_attn_to_linear(a_ )
return new_checkpoint
def __UpperCAmelCase ( a_: str, a_: str, ):
# Only support V1
_UpperCAmelCase : Union[str, Any] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_UpperCAmelCase : Optional[int] = io.BytesIO(r.content )
_UpperCAmelCase : int = OmegaConf.load(a_ )
_UpperCAmelCase : int = 512
_UpperCAmelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_UpperCAmelCase : Optional[int] = {}
with safe_open(a_, framework="pt", device="cpu" ) as f:
for key in f.keys():
_UpperCAmelCase : Tuple = f.get_tensor(a_ )
else:
_UpperCAmelCase : Tuple = torch.load(a_, map_location=a_ )["state_dict"]
# Convert the VAE model.
_UpperCAmelCase : Any = create_vae_diffusers_config(a_, image_size=a_ )
_UpperCAmelCase : Tuple = custom_convert_ldm_vae_checkpoint(a_, a_ )
_UpperCAmelCase : List[str] = AutoencoderKL(**a_ )
vae.load_state_dict(a_ )
vae.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 145 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__a = '▁'
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] = BarthezTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Any = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
_UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,) | 145 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
def __init__( self , _snake_case , _snake_case=3 , _snake_case=32 , _snake_case=3 , _snake_case=10 , _snake_case=[10, 20, 30, 40] , _snake_case=[1, 1, 2, 1] , _snake_case=True , _snake_case=True , _snake_case="relu" , _snake_case=3 , _snake_case=None , ) -> Optional[Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = embeddings_size
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = hidden_act
__a = num_labels
__a = scope
__a = len(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = TFResNetModel(config=_snake_case )
__a = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.num_labels
__a = TFResNetForImageClassification(_snake_case )
__a = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case_ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = TFResNetModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
__a = model_class(_snake_case )
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a = layer_type
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_snake_case , return_tensors='''tf''' )
# forward pass
__a = model(**_snake_case )
# verify the logits
__a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) ) | 33 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) ) | 33 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCAmelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_UpperCAmelCase : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 116 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowerCAmelCase_ = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def snake_case( __magic_name__ , __magic_name__=1 , __magic_name__=2_56 ) -> List[Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
with open(__magic_name__ , '''r''' ) as f:
return json.load(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Tuple:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : List[Any] = os.path.join(__magic_name__ , '''tmp''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : List[Any] = read_json(os.path.join(__magic_name__ , '''params.json''' ) )
lowercase : int = NUM_SHARDS[model_size]
lowercase : str = params['''n_layers''']
lowercase : Optional[int] = params['''n_heads''']
lowercase : str = n_heads // num_shards
lowercase : Dict = params['''dim''']
lowercase : int = dim // n_heads
lowercase : List[str] = 1_0_0_0_0.0
lowercase : Optional[Any] = 1.0 / (base ** (torch.arange(0 , __magic_name__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
lowercase : Union[str, Any] = n_heads_per_shard // num_key_value_heads
lowercase : Any = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase : Optional[Any] = n_heads
lowercase : str = n_heads_per_shard
lowercase : Any = dim
# permute for sliced rotary
def permute(__magic_name__ , __magic_name__=n_heads , __magic_name__=dim , __magic_name__=dim ):
return w.view(__magic_name__ , dima // n_heads // 2 , 2 , __magic_name__ ).transpose(1 , 2 ).reshape(__magic_name__ , __magic_name__ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase : Tuple = torch.load(os.path.join(__magic_name__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
lowercase : str = [
torch.load(os.path.join(__magic_name__ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(__magic_name__ )
]
lowercase : Tuple = 0
lowercase : str = {'''weight_map''': {}}
for layer_i in range(__magic_name__ ):
lowercase : List[Any] = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase : int = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase : List[Any] = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowercase : Tuple = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) )
lowercase : Any = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , )
lowercase : Any = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ )
lowercase : int = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(__magic_name__ )] , dim=1 )
lowercase : str = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(__magic_name__ )] , dim=0 )
lowercase : Any = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(__magic_name__ )] , dim=1 )
lowercase : List[Any] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(__magic_name__ )] , dim=0 )
lowercase : List[Any] = inv_freq
for k, v in state_dict.items():
lowercase : Tuple = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
lowercase : Tuple = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase : Optional[int] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
lowercase : Tuple = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__magic_name__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__magic_name__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase : Tuple = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
# Write configs
lowercase : Tuple = {'''total_size''': param_count * 2}
write_json(__magic_name__ , os.path.join(__magic_name__ , '''pytorch_model.bin.index.json''' ) )
lowercase : Tuple = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
lowercase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
lowercase : List[Any] = LlamaConfig(
hidden_size=__magic_name__ , intermediate_size=compute_intermediate_size(__magic_name__ , __magic_name__ , __magic_name__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__magic_name__ , )
config.save_pretrained(__magic_name__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
lowercase : Dict = LlamaForCausalLM.from_pretrained(__magic_name__ , torch_dtype=torch.floataa , low_cpu_mem_usage=__magic_name__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__magic_name__ , safe_serialization=__magic_name__ )
shutil.rmtree(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : str = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowercase : Tuple = tokenizer_class(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=__magic_name__ , help='''Whether or not to save using `safetensors`.''' )
lowercase : List[str] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase : Any = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , __magic_name__ )
if __name__ == "__main__":
main() | 116 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = 2**power
A__ = 0
while n:
A__ , A__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 7 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
'''simple docstring'''
A_ : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : Tuple = CursorInfo()
_SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : int = CursorInfo()
_SCREAMING_SNAKE_CASE : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 200 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 77 |
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__UpperCamelCase : int = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__UpperCamelCase : List[str] = nn.Parameter(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# set torch weights for 1-to-1 comparison
__UpperCamelCase : Any = np.asarray(weights[0] )
__UpperCamelCase : Tuple = np.asarray(weights[1] )
__UpperCamelCase : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# set torch weights for 1-to-1 comparison
__UpperCamelCase : Dict = np.asarray(weights[0] )
__UpperCamelCase : List[str] = np.asarray(weights[1] )
__UpperCamelCase : Any = np.asarray(weights[2] )
__UpperCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# layernorm 1
__UpperCamelCase : Any = weights[0][0][0]
__UpperCamelCase : List[str] = np.asarray(layer_norm_a[0] )
__UpperCamelCase : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# lsh weights + output
__UpperCamelCase : Union[str, Any] = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__ )
# intermediate weighs
__UpperCamelCase : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
__UpperCamelCase : int = intermediate_weights[2]
# layernorm 2
__UpperCamelCase : List[Any] = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# intermediate dense
__UpperCamelCase : str = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
# intermediate out
__UpperCamelCase : str = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# reformer model
__UpperCamelCase : str = torch_model.reformer
# word embeds
__UpperCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__ ) , )
if isinstance(weights[3] , snake_case__ ):
__UpperCamelCase : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase : Optional[int] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__UpperCamelCase : List[str] = nn.Parameter(torch.tensor(snake_case__ ) )
__UpperCamelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__ )
# output layer norm
__UpperCamelCase : List[str] = np.asarray(weights[7][0] )
__UpperCamelCase : str = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# output embeddings
__UpperCamelCase : Optional[int] = np.asarray(weights[9][0] )
__UpperCamelCase : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# Initialise PyTorch model
__UpperCamelCase : Optional[int] = ReformerConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
__UpperCamelCase : Any = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ , "rb" ) as f:
__UpperCamelCase : Any = pickle.load(snake_case__ )["weights"]
set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 298 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE :Tuple = 'src/diffusers'
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE :Optional[Any] = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
SCREAMING_SNAKE_CASE :List[Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
SCREAMING_SNAKE_CASE :int = '\n{0} = None\n'
SCREAMING_SNAKE_CASE :str = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
SCREAMING_SNAKE_CASE :str = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = _re_backend.findall(a_ )
if len(a_ ) == 0:
return None
return "_and_".join(a_ )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
with open(os.path.join(a_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
# Get to the point we do the actual imports for type checking
__A = 0
__A = {}
# Go through the end of the file
while line_index < len(a_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__A = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
__A = []
# Until we unindent, add backend objects to the list
while line_index < len(a_ ) and len(lines[line_index] ) > 1:
__A = lines[line_index]
__A = _re_single_line_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(a_ ) > 0:
__A = objects
else:
line_index += 1
return backend_specific_objects
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(a_ )
elif name.islower():
return DUMMY_FUNCTION.format(a_ , a_ )
else:
return DUMMY_CLASS.format(a_ , a_ )
def UpperCAmelCase ( a_=None ) -> Dict:
"""simple docstring"""
if backend_specific_objects is None:
__A = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__A = {}
for backend, objects in backend_specific_objects.items():
__A = "[" + ", ".join(F'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
__A = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(a_ , a_ ) for o in objects] )
__A = dummy_file
return dummy_files
def UpperCAmelCase ( a_=False ) -> List[str]:
"""simple docstring"""
__A = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__A = {"torch": "pt"}
# Locate actual dummy modules and read their content.
__A = os.path.join(a_ , "utils" )
__A = {
backend: os.path.join(a_ , F'''dummy_{short_names.get(a_ , a_ )}_objects.py''' )
for backend in dummy_files.keys()
}
__A = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(a_ ):
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.read()
else:
__A = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0.2 , _UpperCAmelCase : Optional[Any]=0.2 ) -> Optional[int]:
"""simple docstring"""
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = conva_get[:2]
__lowercase = conva_get[2]
__lowercase = size_pa
__lowercase = rate_w
__lowercase = rate_t
__lowercase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = -2 * np.random.rand(self.conva[1] ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
def a__ ( self : List[str] , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
__lowercase = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(snake_case_ , 'wb' ) as f:
pickle.dump(snake_case_ , snake_case_ )
print(f"""Model saved: {save_path}""" )
@classmethod
def a__ ( cls : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(snake_case_ , 'rb' ) as f:
__lowercase = pickle.load(snake_case_ ) # noqa: S301
__lowercase = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__lowercase = model_dic.get('size_pooling1' )
__lowercase = model_dic.get('num_bp1' )
__lowercase = model_dic.get('num_bp2' )
__lowercase = model_dic.get('num_bp3' )
__lowercase = model_dic.get('rate_weight' )
__lowercase = model_dic.get('rate_thre' )
# create model instance
__lowercase = CNN(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# modify model parameter
__lowercase = model_dic.get('w_conv1' )
__lowercase = model_dic.get('wkj' )
__lowercase = model_dic.get('vji' )
__lowercase = model_dic.get('thre_conv1' )
__lowercase = model_dic.get('thre_bp2' )
__lowercase = model_dic.get('thre_bp3' )
return conv_ins
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return round(snake_case_ , 3 )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = convs[0]
__lowercase = convs[1]
__lowercase = np.shape(snake_case_ )[0]
# get the data slice of original image data, data_focus
__lowercase = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
__lowercase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case_ )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowercase = []
__lowercase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case_ ):
__lowercase = []
for i_focus in range(len(snake_case_ ) ):
__lowercase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case_ ) )
__lowercase = np.asmatrix(snake_case_ ).reshape(
snake_case_ , snake_case_ )
data_featuremap.append(snake_case_ )
# expanding the data slice to One dimenssion
__lowercase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case_ ) )
__lowercase = np.asarray(snake_case_ )
return focus_list, data_featuremap
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]="average_pool" ) -> Optional[int]:
"""simple docstring"""
__lowercase = len(featuremaps[0] )
__lowercase = int(size_map / size_pooling )
__lowercase = []
for i_map in range(len(snake_case_ ) ):
__lowercase = featuremaps[i_map]
__lowercase = []
for i_focus in range(0 , snake_case_ , snake_case_ ):
for j_focus in range(0 , snake_case_ , snake_case_ ):
__lowercase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case_ ) )
__lowercase = np.asmatrix(snake_case_ ).reshape(snake_case_ , snake_case_ )
featuremap_pooled.append(snake_case_ )
return featuremap_pooled
def a__ ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
for i in range(len(snake_case_ ) ):
__lowercase = np.shape(data[i] )
__lowercase = data[i].reshape(1 , shapes[0] * shapes[1] )
__lowercase = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case_ )
__lowercase = np.asarray(snake_case_ )
return data_expanded
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = np.asarray(snake_case_ )
__lowercase = np.shape(snake_case_ )
__lowercase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = []
__lowercase = 0
for i_map in range(snake_case_ ):
__lowercase = np.ones((size_map, size_map) )
for i in range(0 , snake_case_ , snake_case_ ):
for j in range(0 , snake_case_ , snake_case_ ):
__lowercase = pd_pool[
i_pool
]
__lowercase = i_pool + 1
__lowercase = np.multiply(
snake_case_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case_ )
return pd_all
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=bool ) -> Tuple:
"""simple docstring"""
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(snake_case_ )) )
print((' - - Shape: Teach_Data ', np.shape(snake_case_ )) )
__lowercase = 0
__lowercase = []
__lowercase = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
__lowercase = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(snake_case_ ) ):
# print('------------Learning Image: %d--------------'%p)
__lowercase = np.asmatrix(datas_train[p] )
__lowercase = np.asarray(datas_teach[p] )
__lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
__lowercase = np.shape(snake_case_ )
__lowercase = self._expand(snake_case_ )
__lowercase = data_bp_input
__lowercase = np.dot(snake_case_ , self.vji.T ) - self.thre_bpa
__lowercase = self.sig(snake_case_ )
__lowercase = np.dot(snake_case_ , self.wkj.T ) - self.thre_bpa
__lowercase = self.sig(snake_case_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowercase = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case_ , (1 - bp_outa) ) )
__lowercase = np.multiply(
np.dot(snake_case_ , self.wkj ) , np.multiply(snake_case_ , (1 - bp_outa) ) )
__lowercase = np.dot(snake_case_ , self.vji )
__lowercase = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowercase = pd_conva_pooled.T.getA().tolist()
__lowercase = self._calculate_gradient_from_pool(
snake_case_ , snake_case_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowercase = self._expand_mat(pd_conva_all[k_conv] )
__lowercase = self.rate_weight * np.dot(snake_case_ , snake_case_ )
__lowercase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowercase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowercase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowercase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowercase = self.thre_bpa - pd_k_all * self.rate_thre
__lowercase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowercase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowercase = rp + 1
__lowercase = error_count / patterns
all_mse.append(snake_case_ )
def draw_error():
__lowercase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case_ , '+-' )
plt.plot(snake_case_ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(snake_case_ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(snake_case_ )) )
for p in range(len(snake_case_ ) ):
__lowercase = np.asmatrix(datas_test[p] )
__lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
__lowercase = self._expand(snake_case_ )
__lowercase = data_bp_input
__lowercase = bp_outa * self.vji.T - self.thre_bpa
__lowercase = self.sig(snake_case_ )
__lowercase = bp_outa * self.wkj.T - self.thre_bpa
__lowercase = self.sig(snake_case_ )
produce_out.extend(bp_outa.getA().tolist() )
__lowercase = [list(map(self.do_round , snake_case_ ) ) for each in produce_out]
return np.asarray(snake_case_ )
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = np.asmatrix(snake_case_ )
__lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 325 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''BlipImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _a , _a ) -> Union[str, Any]:
lowerCAmelCase_ = False
super().__init__(_a , _a )
lowerCAmelCase_ = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCAmelCase_ = self.tokenizer
lowerCAmelCase_ = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a )
if text is not None:
lowerCAmelCase_ = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
lowerCAmelCase_ = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def __a ( self , *_a , **_a ) -> Tuple:
return self.tokenizer.batch_decode(*_a , **_a )
def __a ( self , *_a , **_a ) -> Dict:
return self.tokenizer.decode(*_a , **_a )
@property
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.tokenizer.model_input_names
lowerCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 22 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22 | 1 |
from manim import *
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = Rectangle(height=0.5 , width=0.5 )
a :Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a :List[Any] = [mem.copy() for i in range(6 )]
a :int = [mem.copy() for i in range(6 )]
a :Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Union[str, Any] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Any = Text('''CPU''' , font_size=24 )
a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
a :Optional[Any] = [mem.copy() for i in range(4 )]
a :Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Union[str, Any] = Text('''GPU''' , font_size=24 )
a :List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
a :List[str] = [mem.copy() for i in range(6 )]
a :Union[str, Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Optional[int] = Text('''Model''' , font_size=24 )
a :int = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
a :Optional[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
rect.set_stroke(_lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a :int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 )
self.add(_lowerCamelCase )
cpu_targs.append(_lowerCamelCase )
a :Optional[Any] = [mem.copy() for i in range(6 )]
a :Tuple = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
a :Any = Text('''Loaded Checkpoint''' , font_size=24 )
a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
a :Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a :Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
a :Dict = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) )
a :Optional[Any] = []
a :List[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
a :Tuple = fill.copy().set_fill(_lowerCamelCase , opacity=0.7 )
target.move_to(_lowerCamelCase )
first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) )
a :Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 94 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase__ ='\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowercase__ ='\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowercase__ ='\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def lowerCAmelCase (self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowerCAmelCase (self : Any , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[str]=4 , snake_case_ : Union[str, Any]=False ):
__a : str = compute_bleu(
reference_corpus=snake_case_ , translation_corpus=snake_case_ , max_order=snake_case_ , smooth=snake_case_ )
(__a) : Any = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 366 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0 ):
__a : int = -1
__a : Any = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__a : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__a : Union[str, Any] = n - a - b
if c * c == (a * a + b * b):
__a : Union[str, Any] = a * b * c
if candidate >= product:
__a : List[Any] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 90 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = create_tensor(A__ )
UpperCamelCase = gather(A__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = [state.process_index]
UpperCamelCase = gather_object(A__ )
assert len(A__ ) == state.num_processes, F"""{gathered_obj}, {len(A__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = create_tensor(A__ )
UpperCamelCase = broadcast(A__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCamelCase = torch.arange(state.num_processes ).to(state.device )
UpperCamelCase = pad_across_processes(A__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase = create_tensor(A__ )
UpperCamelCase = reduce(A__ , 'sum' )
UpperCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A__ , A__ ), F"""{reduced_tensor} != {truth_tensor}"""
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase = create_tensor(A__ )
UpperCamelCase = reduce(A__ , 'mean' )
UpperCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A__ , A__ ), F"""{reduced_tensor} != {truth_tensor}"""
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = PartialState()
state.print(F"""State: {state}""" )
state.print('testing gather' )
test_gather(A__ )
state.print('testing gather_object' )
test_gather_object(A__ )
state.print('testing broadcast' )
test_broadcast(A__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(A__ )
state.print('testing reduce_sum' )
test_reduce_sum(A__ )
state.print('testing reduce_mean' )
test_reduce_mean(A__ )
if __name__ == "__main__":
main()
| 28 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 | 0 |
'''simple docstring'''
_UpperCAmelCase : str = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_UpperCAmelCase : Optional[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def a__ ( lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
order.append(lowerCamelCase__ )
return order
def a__ ( lowercase : Optional[Any], lowercase : List[Any], lowercase : Tuple ) -> str:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return component
def a__ ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = len(lowerCamelCase__ ) * [False]
_UpperCamelCase = {vert: [] for vert in range(len(lowerCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase__ )
_UpperCamelCase = []
for i, was_visited in enumerate(lowerCamelCase__ ):
if not was_visited:
order += topology_sort(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_UpperCamelCase = []
_UpperCamelCase = len(lowerCamelCase__ ) * [False]
for i in range(len(lowerCamelCase__ ) ):
_UpperCamelCase = order[len(lowerCamelCase__ ) - i - 1]
if not visited[vert]:
_UpperCamelCase = find_components(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
components_list.append(lowerCamelCase__ )
return components_list
| 353 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : List[str] = sys.version_info >= (3, 10)
def a__ ( lowercase : Dict=None, lowercase : List[str]=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=lowercase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : float
_snake_case : str
_snake_case : bool
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int = 4_2
_snake_case : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : Optional[bool] = None
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'titi'
_snake_case : Union[str, Any] = 'toto'
_snake_case : Any = 4_2
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : BasicEnum = "toto"
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : MixedTypeEnum = "toto"
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : Optional[float] = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : Optional[str] = None
_snake_case : Optional[List[str]] = list_field(default=[] )
_snake_case : Optional[List[int]] = list_field(default=[] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = list_field(default=[] )
_snake_case : List[int] = list_field(default=[1, 2, 3] )
_snake_case : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_snake_case : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[int] = field()
_snake_case : str = field()
_snake_case : BasicEnum = field()
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : "BasicEnum" = field()
_snake_case : "Optional[bool]" = None
_snake_case : "str" = field(default='toto' , metadata={'help': 'help message'} )
_snake_case : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = False
_snake_case : bool = True
_snake_case : bool | None = None
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int | None = None
_snake_case : float | None = field(default=__magic_name__ , metadata={'help': 'help message'} )
_snake_case : str | None = None
_snake_case : list[str] | None = list_field(default=[] )
_snake_case : list[int] | None = list_field(default=[] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : argparse.ArgumentParser , lowerCAmelCase__ : argparse.ArgumentParser ) -> str:
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
_UpperCamelCase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowerCAmelCase__ ) and yy.get('''choices''' , lowerCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowerCAmelCase__ ) , yy['''type'''](lowerCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--bar''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--flag''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCamelCase) , ) = parser.parse_args_into_dataclasses(lowerCAmelCase__ , look_for_args_file=lowerCAmelCase__ )
self.assertFalse(example.flag )
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowerCAmelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
_UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Literal["titi", "toto", 4_2] = "toto"
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(
lowerCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--bar''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ )
_UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCAmelCase__ )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , bar=lowerCAmelCase__ , baz=lowerCAmelCase__ , ces=[] , des=[] ) )
_UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowerCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument('''--required_str''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , )
expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ )
self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCamelCase = parser.parse_dict(lowerCAmelCase__ )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowerCAmelCase__ , parser.parse_dict , lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_json''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''temp_yaml''' )
os.mkdir(lowerCAmelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCamelCase = BasicExample(**lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = HfArgumentParser(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 287 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a : List[Any] = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
a : Any = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
a : Any = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[str, int]:
'''simple docstring'''
a : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _SCREAMING_SNAKE_CASE ( _lowercase : tuple ) ->str:
'''simple docstring'''
return x[0]
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : Tuple = get_letter_count(_lowercase )
a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowercase )
a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowercase )
a : List[Any] = "".join(freq_to_letter[freq] )
a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowercase , reverse=_lowercase )
a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->int:
'''simple docstring'''
a : int = get_frequency_order(_lowercase )
a : List[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
from itertools import product
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->list[int]:
'''simple docstring'''
a : Dict = sides_number
a : List[str] = max_face_number * dice_number
a : Optional[int] = [0] * (max_total + 1)
a : Dict = 1
a : Optional[Any] = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
a : Union[str, Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _SCREAMING_SNAKE_CASE ( ) ->float:
'''simple docstring'''
a : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a : List[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a : Optional[Any] = 0
a : Tuple = 9
a : Union[str, Any] = 4 * 9
a : Any = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a : List[str] = (4**9) * (6**6)
a : List[Any] = peter_wins_count / total_games_number
a : Any = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = PhobertTokenizer
snake_case_ = False
def lowercase_ ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = ['#version: 0.2', 'l à</w>']
__lowerCamelCase = {'unk_token': '<unk>'}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'Tôi là VinAI Research'
__lowerCamelCase = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = 'Tôi là VinAI Research'
__lowerCamelCase = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 90 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=4, ):
A : List[str] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[str] = use_attention_mask
A : Union[str, Any] = use_token_type_ids
A : Any = use_labels
A : str = vocab_size
A : Union[str, Any] = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : Optional[Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : List[Any] = initializer_range
A : str = num_choices
def _lowerCAmelCase ( self ):
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Union[str, Any] = None
if self.use_attention_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
A : Dict = self.prepare_config_and_inputs()
A , A , A , A : str = config_and_inputs
A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModelTester(self )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Dict = model_class_name.from_pretrained("""albert-base-v2""" )
A : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )[0]
A : str = (1, 11, 768)
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[int] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowerCamelCase__, atol=1e-4 ) )
| 116 | 0 |
'''simple docstring'''
from PIL import Image
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any] ) -> Image:
__snake_case : Any = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_UpperCAmelCase : Optional[int] ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
A__ : Optional[Any] = change_contrast(img, 1_7_0)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 366 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase : str = input_file.read()
UpperCAmelCase : int = regexp.search(_SCREAMING_SNAKE_CASE )
return match
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : Union[str, Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase : Optional[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = Path("""./datasets""" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = Path("""./datasets""" )
UpperCAmelCase : Optional[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 109 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 308 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PhobertTokenizer
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : Tuple = ["T@@", "i", "I", "R@@", "r", "e@@"]
__UpperCAmelCase : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__UpperCAmelCase : Optional[Any] = ["#version: 0.2", "l à</w>"]
__UpperCAmelCase : List[str] = {"unk_token": "<unk>"}
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def lowerCamelCase_ ( self : str , **UpperCAmelCase_ : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : int = "Tôi là VinAI Research"
__UpperCAmelCase : Optional[int] = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : Any = "Tôi là VinAI Research"
__UpperCAmelCase : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
__UpperCAmelCase : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
print(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Union[str, Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 365 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : str = 4
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
__UpperCAmelCase : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : int = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
__UpperCAmelCase : Optional[Any] = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Optional[int] = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : int = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[str]=(32, 32) ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.dummy_input
__UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = noise
__UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : int = (256, 256)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Union[str, Any] = (32, 32)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# not required for this model
pass
| 37 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class _lowercase ( snake_case_ , snake_case_ ):
lowercase = 'convnextv2'
def __init__( self : Union[str, Any] , snake_case : int=3 , snake_case : Any=4 , snake_case : str=4 , snake_case : Optional[Any]=None , snake_case : List[Any]=None , snake_case : List[Any]="gelu" , snake_case : List[str]=0.02 , snake_case : List[str]=1e-12 , snake_case : List[Any]=0.0 , snake_case : str=2_2_4 , snake_case : str=None , snake_case : Any=None , **snake_case : int , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
UpperCamelCase_ : Dict = num_channels
UpperCamelCase_ : Dict = patch_size
UpperCamelCase_ : Dict = num_stages
UpperCamelCase_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCamelCase_ : List[Any] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase_ : List[str] = hidden_act
UpperCamelCase_ : Dict = initializer_range
UpperCamelCase_ : Optional[Any] = layer_norm_eps
UpperCamelCase_ : str = drop_path_rate
UpperCamelCase_ : Dict = image_size
UpperCamelCase_ : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase_, UpperCamelCase_ : int = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 175 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : str = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
__lowercase : Any = 1024
__lowercase : List[str] = 4096
__lowercase : List[str] = 24
__lowercase : Union[str, Any] = 16
__lowercase : Dict = [5, 11, 17, 23]
__lowercase : Optional[int] = [256, 512, 1024, 1024]
__lowercase : Union[str, Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowercase : Optional[Any] = 768
__lowercase : Tuple = [1, 1, 1, 0.5]
__lowercase : Dict = [256, 512, 768, 768]
__lowercase : str = 150
__lowercase : Any = 16
__lowercase : Tuple = (1, 384, 384)
__lowercase : List[Any] = False
__lowercase : Optional[Any] = """project"""
if "ade" in checkpoint_url:
__lowercase : List[str] = True
__lowercase : int = 768
__lowercase : Tuple = [1, 1, 1, 0.5]
__lowercase : Optional[Any] = 150
__lowercase : Optional[Any] = 16
__lowercase : int = """huggingface/label-files"""
__lowercase : Any = """ade20k-id2label.json"""
__lowercase : Dict = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__lowercase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : List[Any] = idalabel
__lowercase : Any = {v: k for k, v in idalabel.items()}
__lowercase : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowercase : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowercase : Optional[int] = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
__lowercase : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowercase : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowercase : str = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowercase : List[str] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowercase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
__lowercase : str = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowercase : int = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowercase : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowercase : Tuple = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowercase : int = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowercase : Dict = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowercase : Dict = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowercase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase : str = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__lowercase : Tuple = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowercase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowercase : int = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowercase : List[str] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowercase : str = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase : List[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase : Tuple = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase : int = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowercase : int = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowercase : Optional[int] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowercase : Any = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowercase : Optional[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowercase : Optional[int] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowercase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowercase : Optional[int] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowercase : str = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowercase : List[Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowercase : str = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowercase : Optional[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
__lowercase : Tuple = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
__lowercase : Dict = name.replace("""..""" , """.""" )
if "stem.conv" in name:
__lowercase : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__lowercase : int = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
__lowercase : Optional[Any] = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
__lowercase : Dict = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
__lowercase : Dict = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
__lowercase : List[Any] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase : List[Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
__lowercase : str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Optional[Any] = in_proj_weight[: config.hidden_size, :]
__lowercase : Any = in_proj_bias[: config.hidden_size]
__lowercase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ):
__lowercase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : str = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
__lowercase , __lowercase : Any = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase : Dict = torch.load(_UpperCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
__lowercase : List[str] = state_dict.pop(_UpperCamelCase )
__lowercase : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowercase : Optional[int] = DPTForSemanticSegmentation(_UpperCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
__lowercase : List[Any] = 480 if """ade""" in checkpoint_url else 384
__lowercase : List[str] = DPTImageProcessor(size=_UpperCamelCase )
__lowercase : str = prepare_img()
__lowercase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""pt""" )
# forward pass
__lowercase : Optional[Any] = model(**_UpperCamelCase ).logits if """ade""" in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
if show_prediction:
__lowercase : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_UpperCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowerCamelCase : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 368 |
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 306 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: str = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
a__: Tuple = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__: Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__: Optional[int] = 4
a__: Any = True
# hparam_utils.py hparams
a__: Optional[int] = 0.664_694
a__: List[str] = 0.207_951
a__: List[str] = 0.121_194
a__: int = True
a__: Dict = True
a__: List[str] = False
a__: Union[str, Any] = 0.0_352_513
a__: Union[str, Any] = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__: Union[str, Any] = 4
a__: Optional[Any] = False
# hparam_utils.py hparams
a__: int = 36.4_519
a__: str = 0.903_421
a__: Optional[Any] = 222.088
a__: Dict = True
a__: Union[str, Any] = True
a__: Union[str, Any] = True
a__: List[str] = 0.763_141
a__: str = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
a__: Tuple = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
a__: int = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__: Tuple = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__: int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 290 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any=1024 ) -> Dict:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase , __lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase__ : List[str] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase = new_src + ' ' + src
__lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase , __lowerCamelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=128 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 90 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowercase : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : Any, lowerCamelCase : int )-> Optional[Any]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =[label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any] )-> Optional[int]:
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(lowerCamelCase ) )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Tuple =[sequences]
lowerCamelCase__ : Any =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Optional[Any]=ZeroShotClassificationArgumentHandler(), *lowerCamelCase : Tuple, **lowerCamelCase : List[str] )-> int:
lowerCamelCase__ : Optional[Any] =args_parser
super().__init__(*lowerCamelCase, **lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def snake_case ( self : List[Any] )-> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : Any=True, lowerCamelCase : List[str]=True, lowerCamelCase : int=TruncationStrategy.ONLY_FIRST, **lowerCamelCase : List[Any] )-> List[str]:
lowerCamelCase__ : Tuple =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
lowerCamelCase__ : Any =self.tokenizer.eos_token
try:
lowerCamelCase__ : Tuple =self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, )
except Exception as e:
if "too short" in str(lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ : List[Any] =self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, padding=lowerCamelCase, truncation=TruncationStrategy.DO_NOT_TRUNCATE, )
else:
raise e
return inputs
def snake_case ( self : int, **lowerCamelCase : Tuple )-> Any:
if kwargs.get('''multi_class''', lowerCamelCase ) is not None:
lowerCamelCase__ : Optional[int] =kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
lowerCamelCase__ : Any ={}
if "candidate_labels" in kwargs:
lowerCamelCase__ : Dict =self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ : Optional[Any] =kwargs['''hypothesis_template''']
lowerCamelCase__ : str ={}
if "multi_label" in kwargs:
lowerCamelCase__ : Optional[Any] =kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : List[str], lowerCamelCase : Union[str, List[str]], *lowerCamelCase : Union[str, Any], **lowerCamelCase : int, )-> Union[str, Any]:
if len(lowerCamelCase ) == 0:
pass
elif len(lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ : int =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : List[str], lowerCamelCase : Any=None, lowerCamelCase : Optional[Any]="This example is {}." )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self._args_parser(lowerCamelCase, lowerCamelCase, lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase, lowerCamelCase ) ):
lowerCamelCase__ : Optional[Any] =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase ) - 1,
**model_input,
}
def snake_case ( self : Optional[int], lowerCamelCase : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Dict =inputs['''candidate_label''']
lowerCamelCase__ : Union[str, Any] =inputs['''sequence''']
lowerCamelCase__ : Any ={k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ : Optional[Any] =self.model(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def snake_case ( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Any=False )-> Union[str, Any]:
lowerCamelCase__ : Optional[Any] =[outputs['''candidate_label'''] for outputs in model_outputs]
lowerCamelCase__ : Tuple =[outputs['''sequence'''] for outputs in model_outputs]
lowerCamelCase__ : Union[str, Any] =np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
lowerCamelCase__ : Dict =logits.shape[0]
lowerCamelCase__ : str =len(lowerCamelCase )
lowerCamelCase__ : List[str] =N // n
lowerCamelCase__ : Any =logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ : Tuple =self.entailment_id
lowerCamelCase__ : Optional[int] =-1 if entailment_id == 0 else 0
lowerCamelCase__ : Union[str, Any] =reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ : List[str] =np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1, keepdims=lowerCamelCase )
lowerCamelCase__ : Tuple =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ : int =reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ : List[Any] =np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1, keepdims=lowerCamelCase )
lowerCamelCase__ : List[Any] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 272 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
lowerCamelCase__ : List[Any] =tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowerCamelCase__ : List[str] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : str=-1 ):
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase__ : Optional[Any] =[1] * inputs.shape.rank
lowerCamelCase__ : Union[str, Any] =shape_list(__lowerCamelCase )[axis]
lowerCamelCase__ : Optional[Any] =tf.reshape(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase__ : List[str] =tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 ):
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase__ : Optional[int] =tf.shape(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase__ : List[Any] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : tf.Tensor ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , tf.Tensor ):
lowerCamelCase__ : List[Any] =tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase__ : int =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase__ : int =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase__ : str =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : int , __lowerCamelCase : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Any =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase__ : Tuple =[x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCamelCase__ : Optional[Any] =np.asarray(__lowerCamelCase )
lowerCamelCase__ : str =1
lowerCamelCase__ : List[Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase__ : Union[str, Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowerCamelCase__ : List[str] =chunk_data
else:
lowerCamelCase__ : Dict =data
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if name in group.attrs:
lowerCamelCase__ : Optional[int] =[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCamelCase__ : str =[]
lowerCamelCase__ : str =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
def _expand_single_ad_tensor(__lowerCamelCase : List[Any] ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 272 | 1 |
def lowercase_ (A : List[str] ):
snake_case__ : int = len(A )
for i in range(length - 1 ):
snake_case__ : Tuple = i
for k in range(i + 1 , A ):
if collection[k] < collection[least]:
snake_case__ : Optional[int] = k
if least != i:
snake_case__ , snake_case__ : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a_ :str = input("Enter numbers separated by a comma:\n").strip()
a_ :str = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 277 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __A ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = "time_series_transformer"
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , __A = None , __A = None , __A = "student_t" , __A = "nll" , __A = 1 , __A = [1, 2, 3, 4, 5, 6, 7] , __A = "mean" , __A = 0 , __A = 0 , __A = 0 , __A = 0 , __A = None , __A = None , __A = 32 , __A = 32 , __A = 2 , __A = 2 , __A = 2 , __A = 2 , __A = True , __A = "gelu" , __A = 64 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 100 , __A = 0.02 , __A=True , **__A , ) -> Optional[int]:
# time series specific configuration
a =prediction_length
a =context_length or prediction_length
a =distribution_output
a =loss
a =input_size
a =num_time_features
a =lags_sequence
a =scaling
a =num_dynamic_real_features
a =num_static_real_features
a =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
a =cardinality
else:
a =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
a =embedding_dimension
else:
a =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a =num_parallel_samples
# Transformer architecture configuration
a =input_size * len(__A ) + self._number_of_features
a =d_model
a =encoder_attention_heads
a =decoder_attention_heads
a =encoder_ffn_dim
a =decoder_ffn_dim
a =encoder_layers
a =decoder_layers
a =dropout
a =attention_dropout
a =activation_dropout
a =encoder_layerdrop
a =decoder_layerdrop
a =activation_function
a =init_std
a =use_cache
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 353 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =state_dict.pop(lowercase )
a =val
def _A ( lowercase ):
"""simple docstring"""
a =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
a =value
else:
a =value
return new_state_dict
def _A ( lowercase ):
"""simple docstring"""
a =''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a =state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a =in_proj_weight_cross_attn[:2_56, :]
a =in_proj_bias_cross_attn[:2_56]
a =in_proj_weight_cross_attn[2_56:5_12, :]
a =in_proj_bias_cross_attn[2_56:5_12]
a =in_proj_weight_cross_attn[-2_56:, :]
a =in_proj_bias_cross_attn[-2_56:]
def _A ( lowercase , lowercase ):
"""simple docstring"""
a , a =image.size
a =max(lowercase , lowercase )
a =8_00 if '''detection''' in checkpoint_url else 10_00
a =target_max_size / current_max_size
a =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( lowercase ):
"""simple docstring"""
a =F.to_tensor(lowercase )
a =F.normalize(lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
a =torch.hub.load_state_dict_from_url(lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
a =rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a ='''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a =state_dict.pop(lowercase )
a =val
# create HuggingFace model and load state dict
a =TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a =15
a =2
a ={0: '''table''', 1: '''table rotated'''}
a =idalabel
a ={v: k for k, v in idalabel.items()}
else:
a =1_25
a =6
a ={
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
a =TableTransformerForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
# verify our conversion
a ='''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
a =hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase )
a =Image.open(lowercase ).convert('''RGB''' )
a =normalize(resize(lowercase , lowercase ) ).unsqueeze(0 )
a =model(lowercase )
if "detection" in checkpoint_url:
a =(1, 15, 3)
a =torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
a =torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
a =(1, 1_25, 7)
a =torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
a =torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
a =(
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase )
image_processor.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 215 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Optional[int] = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : str = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : str = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Union[str, Any] = DatasetInfo()
_a : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 294 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''informer'''
__UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ):
"""simple docstring"""
# time series specific configuration
_A: Optional[Any] = prediction_length
_A: Optional[Any] = context_length or prediction_length
_A: Dict = distribution_output
_A: List[str] = loss
_A: int = input_size
_A: List[str] = num_time_features
_A: Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A: str = scaling
_A: Optional[Any] = num_dynamic_real_features
_A: List[Any] = num_static_real_features
_A: Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A: str = cardinality
else:
_A: Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A: List[str] = embedding_dimension
else:
_A: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_A: int = num_parallel_samples
# Transformer architecture configuration
_A: Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A: Union[str, Any] = d_model
_A: Optional[Any] = encoder_attention_heads
_A: Optional[Any] = decoder_attention_heads
_A: Optional[Any] = encoder_ffn_dim
_A: Union[str, Any] = decoder_ffn_dim
_A: Any = encoder_layers
_A: str = decoder_layers
_A: List[str] = dropout
_A: Any = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[Any] = encoder_layerdrop
_A: str = decoder_layerdrop
_A: int = activation_function
_A: Tuple = init_std
_A: Union[str, Any] = use_cache
# Informer
_A: Union[str, Any] = attention_type
_A: str = sampling_factor
_A: List[str] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __snake_case (_a ):
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any]=5_0267 , _UpperCAmelCase : List[Any]=1024 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Tuple=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Dict=1024 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=2 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=100 , _UpperCAmelCase : int=800 , **_UpperCAmelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Dict = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : str = decoder_ffn_dim
_lowerCAmelCase : Tuple = decoder_layers
_lowerCAmelCase : str = decoder_attention_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Tuple = activation_function
_lowerCAmelCase : Dict = init_std
_lowerCAmelCase : Tuple = encoder_layerdrop
_lowerCAmelCase : Any = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = classifier_dropout
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Any = use_prompt
_lowerCAmelCase : Optional[int] = prompt_length
_lowerCAmelCase : Optional[int] = prompt_mid_dim
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
| 159 | 0 |
'''simple docstring'''
from manim import *
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = Rectangle(height=0.5 ,width=0.5 )
_UpperCamelCase : Tuple = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCamelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : List[Any] = VGroup(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = Text('CPU' ,font_size=24 )
_UpperCamelCase : Optional[Any] = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Any = Text('GPU' ,font_size=24 )
_UpperCamelCase : Dict = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ ,lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Optional[Any] = Text('Model' ,font_size=24 )
_UpperCamelCase : int = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,)
_UpperCamelCase : List[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
_UpperCamelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Tuple = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ,run_time=2.5 ) ,Write(lowerCamelCase__ ) ,Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCamelCase : Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ ,opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCamelCase : List[Any] = 0.4_6 / 4
_UpperCamelCase : Optional[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase__ ,buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ ,run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 83 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( _lowercase = 2_0 ) -> int:
UpperCAmelCase : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase : Any = n // 2
return int(factorial(_lowercase ) / (factorial(_lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 338 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a : List[str] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A=None ) -> Union[str, Any]:
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A ) -> List[Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : str = str(distributed_port + 1 )
UpperCAmelCase : Any = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase( self , A , A , A=torch.floataa ) -> str:
UpperCAmelCase : List[Any] = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def _lowercase( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase : str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
UpperCAmelCase : List[Any] = question_hidden_states.shape[0]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._main_retrieve(torch.cat(A ).numpy() , A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase : List[str] = self._chunk_tensor(A , A )
UpperCAmelCase : Union[str, Any] = self._chunk_tensor(A , A )
UpperCAmelCase : Tuple = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 338 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( __A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """FlavaImageProcessor"""
__a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
__UpperCAmelCase : Any = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : List[str] = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[ImageInput] = None , UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
__UpperCAmelCase : Dict = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase__ ( self : str , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase__ ( self : str , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 115 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def snake_case ( A__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( A__ ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( A__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
UpperCAmelCase_ : List[str] = Node(1 )
UpperCAmelCase_ : Any = Node(2 )
UpperCAmelCase_ : Optional[Any] = Node(3 )
UpperCAmelCase_ : Union[str, Any] = Node(4 )
UpperCAmelCase_ : int = Node(5 )
UpperCAmelCase_ : Optional[int] = Node(6 )
UpperCAmelCase_ : Any = Node(7 )
UpperCAmelCase_ : List[str] = Node(8 )
UpperCAmelCase_ : List[Any] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("Tree is: " )
display(A__ )
if __name__ == "__main__":
main()
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class snake_case__ :
A__ = XGLMConfig
A__ = {}
A__ = '''gelu'''
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any]=14 , __a : Optional[Any]=7 , __a : List[str]=True , __a : List[str]=True , __a : Tuple=True , __a : Union[str, Any]=99 , __a : Optional[int]=32 , __a : Tuple=2 , __a : Optional[Any]=4 , __a : List[str]=37 , __a : int="gelu" , __a : str=0.1 , __a : Any=0.1 , __a : List[str]=512 , __a : Optional[Any]=0.0_2 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : int = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Tuple = use_input_mask
__snake_case : Union[str, Any] = use_labels
__snake_case : List[str] = vocab_size
__snake_case : List[str] = d_model
__snake_case : Any = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Tuple = ffn_dim
__snake_case : Any = activation_function
__snake_case : Union[str, Any] = activation_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : List[Any] = max_position_embeddings
__snake_case : Tuple = initializer_range
__snake_case : Tuple = None
__snake_case : List[str] = 0
__snake_case : Tuple = 2
__snake_case : Any = 1
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__snake_case : List[Any] = None
if self.use_input_mask:
__snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = self.get_config()
__snake_case : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Dict ) -> int:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Optional[int] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ = False
A__ = False
A__ = False
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : str = TFXGLMModelTester(self )
__snake_case : str = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any , __a : Optional[Any]=True ) -> str:
'''simple docstring'''
__snake_case : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Dict = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__snake_case : Tuple = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__snake_case : str = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__snake_case : Union[str, Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__snake_case : List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__snake_case : Union[str, Any] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
__snake_case : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : List[Any] = 'left'
# use different length sentences to test batching
__snake_case : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__snake_case : int = tokenizer(lowerCamelCase_ , return_tensors='tf' , padding=lowerCamelCase_ )
__snake_case : Tuple = inputs['input_ids']
__snake_case : Any = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__snake_case : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__snake_case : Optional[Any] = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
__snake_case : Dict = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__snake_case : int = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
__snake_case : int = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
__snake_case : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : int = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
__snake_case : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
import math
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(UpperCamelCase_ ) ) # Size of every segment
snake_case = [True] * (end + 1)
snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCamelCase_ )
for i in range(start * start ,end + 1 ,UpperCamelCase_ ):
snake_case = False
start += 1
prime += in_prime
snake_case = end + 1
snake_case = min(2 * end ,UpperCamelCase_ )
while low <= n:
snake_case = [True] * (high - low + 1)
for each in in_prime:
snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCamelCase_ ,high + 1 ,UpperCamelCase_ ):
snake_case = False
for j in range(len(UpperCamelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case = high + 1
snake_case = min(high + end ,UpperCamelCase_ )
return prime
print(sieve(10**6))
| 127 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase : Tuple = test_metrics
@require_cpu
def lowercase__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 365 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =None
a : List[Any] =BloomTokenizerFast
a : Optional[int] =BloomTokenizerFast
a : Optional[Any] =True
a : Dict =False
a : Optional[Any] ="tokenizer_file"
a : Optional[int] ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Tuple = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : List[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase : str = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase : Optional[int] = tokenizer.batch_encode_plus(snake_case__ )["input_ids"]
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase : str = "This is a simple input"
lowerCAmelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
tokenizer_r.encode(snake_case__ , max_length=snake_case__ )
tokenizer_r.batch_encode_plus(snake_case__ , max_length=snake_case__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_rust_tokenizer()
lowerCAmelCase : int = load_dataset("xnli" , "all_languages" , split="test" , streaming=snake_case__ )
lowerCAmelCase : Tuple = next(iter(snake_case__ ) )["premise"] # pick up one data
lowerCAmelCase : Optional[Any] = list(sample_data.values() )
lowerCAmelCase : int = list(map(tokenizer.encode , snake_case__ ) )
lowerCAmelCase : List[Any] = [tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) for x in output_tokens]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 133 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'README.md' )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
__UpperCamelCase = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = model_name.split('''-''')
__UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 113 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
A__ = {}
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase ):
A__ = super().add_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=1,**__lowerCamelCase ):
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
output.append(__lowerCamelCase )
else:
A__ = []
for i in range(__lowerCamelCase ):
A__ = placeholder_token + f"_{i}"
self.try_adding_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
output.append(__lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
A__ = output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0 ):
if isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = []
for i in range(len(__lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=__lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(__lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(__lowerCamelCase )
random.shuffle(__lowerCamelCase )
A__ = text.replace(__lowerCamelCase,''' '''.join(__lowerCamelCase ) )
return text
def __call__( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0,**__lowerCamelCase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase,vector_shuffle=__lowerCamelCase,prop_tokens_to_load=__lowerCamelCase ),*__lowerCamelCase,**__lowerCamelCase,)
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0,**__lowerCamelCase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase,vector_shuffle=__lowerCamelCase,prop_tokens_to_load=__lowerCamelCase ),*__lowerCamelCase,**__lowerCamelCase,)
| 39 |
def UpperCamelCase__( )->Dict:
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 19_01
A__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 39 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowercase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _snake_case ( snake_case__ : str , snake_case__ : Dict=100 , snake_case__ : int=" " ):
A = text.split(snake_case__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case__ ) , snake_case__ )]
def _snake_case ( snake_case__ : dict ):
A , A = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(snake_case__ ):
titles.append(title if title is not None else '' )
texts.append(snake_case__ )
return {"title": titles, "text": texts}
def _snake_case ( snake_case__ : dict , snake_case__ : DPRContextEncoder , snake_case__ : DPRContextEncoderTokenizerFast ):
A = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=snake_case__ , padding='longest' , return_tensors='pt' )['input_ids']
A = ctx_encoder(input_ids.to(device=snake_case__ ) , return_dict=snake_case__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _snake_case ( snake_case__ : "RagExampleArguments" , snake_case__ : "ProcessingArguments" , snake_case__ : "IndexHnswArguments" , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A = dataset.map(snake_case__ , batched=snake_case__ , num_proc=processing_args.num_proc )
# And compute the embeddings
A = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case__ )
A = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
A = dataset.map(
partial(snake_case__ , ctx_encoder=snake_case__ , ctx_tokenizer=snake_case__ ) , batched=snake_case__ , batch_size=processing_args.batch_size , features=snake_case__ , )
# And finally save your dataset
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(snake_case__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=snake_case__ )
# And save the index
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(snake_case__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowerCamelCase: str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowerCamelCase: str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowerCamelCase: int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowercase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowercase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCamelCase :
@staticmethod
def _lowerCAmelCase ( *UpperCamelCase : List[str] , **UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowerCAmelCase__ : List[str] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}],
[{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}],
] , )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
lowerCAmelCase__ : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase__ : Optional[Any] = """How many cats are there?"""
lowerCAmelCase__ : Optional[int] = vqa_pipeline(image=_snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_snake_case , [{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}, {"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}] )
lowerCAmelCase__ : List[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_snake_case , [{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}, {"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}] )
@slow
@require_torch
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
lowerCAmelCase__ : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase__ : Optional[int] = """How many cats are there?"""
lowerCAmelCase__ : Any = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
lowerCAmelCase__ : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
lowerCAmelCase__ : Optional[Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
| 357 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-12 ) -> Dict:
lowerCAmelCase__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
lowerCAmelCase__ : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
return jnp.matmul(__UpperCAmelCase , norm_emb_a.T )
class _lowerCamelCase ( nn.Module ):
_lowerCamelCase :CLIPConfig
_lowerCamelCase :jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ : Dict = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowerCAmelCase__ : Optional[int] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCAmelCase__ : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCAmelCase__ : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.vision_model(UpperCamelCase )[1]
lowerCAmelCase__ : List[Any] = self.visual_projection(UpperCamelCase )
lowerCAmelCase__ : str = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowerCAmelCase__ : Optional[int] = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ : Any = 0.0
lowerCAmelCase__ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ : Any = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ : Tuple = is_special_care * 0.01
lowerCAmelCase__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ : Dict = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = CLIPConfig
_lowerCamelCase :Dict = "clip_input"
_lowerCamelCase :int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : int , ) -> str:
"""simple docstring"""
if input_shape is None:
lowerCAmelCase__ : str = (1, 2_24, 2_24, 3)
lowerCAmelCase__ : Union[str, Any] = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
# init input tensor
lowerCAmelCase__ : Union[str, Any] = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = jax.random.split(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase__ : str = self.module.init(UpperCamelCase , UpperCamelCase )["""params"""]
return random_params
def __call__( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 212 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Dict = tmp_path / '''file.csv'''
UpperCAmelCase__ : Optional[int] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : List[Any] = tmp_path / '''malformed_file.csv'''
UpperCAmelCase__ : Any = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
UpperCAmelCase__ : int = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : List[Any] = tmp_path / '''csv_with_label.csv'''
UpperCAmelCase__ : List[str] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Tuple = tmp_path / '''csv_with_int_list.csv'''
UpperCAmelCase__ : List[Any] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : Dict = Csv()
UpperCAmelCase__ : Dict = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCAmelCase__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowerCAmelCase__ ) in record.message
for record in caplog.records )
@require_pil
def a__ ( lowerCAmelCase__ ) -> Dict:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : int = f.read().splitlines()[1]
UpperCAmelCase__ : List[str] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
UpperCAmelCase__ : str = csv._generate_tables([[csv_file_with_image]] )
UpperCAmelCase__ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCAmelCase__ : List[Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def a__ ( lowerCAmelCase__ ) -> Any:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : int = f.read().splitlines()[1:]
UpperCAmelCase__ : Tuple = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCAmelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
UpperCAmelCase__ : str = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCAmelCase__ : int = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowerCAmelCase__ ) for label in labels]
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : List[Any] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowerCAmelCase__ : [int(lowerCAmelCase__ ) for i in x.split()]} )
UpperCAmelCase__ : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
UpperCAmelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCAmelCase__ : str = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 181 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : str = R'''\w+[.]\d+'''
UpperCAmelCase__ : List[Any] = re.findall(lowerCAmelCase__ , lowerCAmelCase__ )
for pat in pats:
UpperCAmelCase__ : Union[str, Any] = key.replace(lowerCAmelCase__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase__ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=42 ) -> Tuple:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(lowerCAmelCase__ ) )
UpperCAmelCase__ : Optional[Any] = flatten_dict(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[int] = rename_key(lowerCAmelCase__ )
UpperCAmelCase__ : str = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = rename_key_and_reshape_tensor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
| 181 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : str = [0] * len(snake_case_ )
_A : Optional[int] = []
_A : List[Any] = []
_A : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case_ ) ):
if indegree[i] == 0:
queue.append(snake_case_ )
while queue:
_A : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(snake_case_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case_ )
if cnt != len(snake_case_ ):
print("""Cycle exists""" )
else:
print(snake_case_ )
# Adjacency List of Graph
_snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 371 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = (CMStochasticIterativeScheduler,)
__UpperCamelCase: Optional[int] = 1_0
def _A ( self : int , **A : int ):
_UpperCAmelCase : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**A )
return config
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : str = self.scheduler_classes[0](**A )
scheduler.set_timesteps(A )
_UpperCAmelCase : Tuple = scheduler.timesteps[0]
_UpperCAmelCase : Tuple = scheduler.timesteps[1]
_UpperCAmelCase : List[Any] = self.dummy_sample
_UpperCAmelCase : Optional[int] = 0.1 * sample
_UpperCAmelCase : List[Any] = scheduler.step(A , A , A ).prev_sample
_UpperCAmelCase : List[str] = scheduler.step(A , A , A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _A ( self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _A ( self : Dict ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A )
def _A ( self : str ):
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A )
_UpperCAmelCase : Tuple = 1
scheduler.set_timesteps(A )
_UpperCAmelCase : Any = scheduler.timesteps
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A ):
# 1. scale model input
_UpperCAmelCase : str = scheduler.scale_model_input(A , A )
# 2. predict noise residual
_UpperCAmelCase : List[Any] = model(A , A )
# 3. predict previous sample x_t-1
_UpperCAmelCase : Any = scheduler.step(A , A , A , generator=A ).prev_sample
_UpperCAmelCase : List[Any] = pred_prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(A ) )
_UpperCAmelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def _A ( self : str ):
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**A )
_UpperCAmelCase : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=A )
_UpperCAmelCase : Tuple = scheduler.timesteps
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_UpperCAmelCase : str = scheduler.scale_model_input(A , A )
# 2. predict noise residual
_UpperCAmelCase : Optional[Any] = model(A , A )
# 3. predict previous sample x_t-1
_UpperCAmelCase : List[str] = scheduler.step(A , A , A , generator=A ).prev_sample
_UpperCAmelCase : int = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**A )
_UpperCAmelCase : str = [39, 30, 12, 15, 0]
with self.assertRaises(A , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**A )
_UpperCAmelCase : Dict = [39, 30, 12, 1, 0]
_UpperCAmelCase : Dict = len(A )
with self.assertRaises(A , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A )
_UpperCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=A )
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = PegasusConfig
__lowerCAmelCase = {}
__lowerCAmelCase = '''gelu'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Dict=37 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Union[str, Any]=40 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : int=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def A (self : List[str] ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def A (self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
A = TFPegasusModel(config=_lowerCAmelCase ).get_decoder()
A = inputs_dict["""input_ids"""]
A = input_ids[:1, :]
A = inputs_dict["""attention_mask"""][:1, :]
A = inputs_dict["""head_mask"""]
A = 1
# first forward pass
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
A , A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) ->Tuple:
"""simple docstring"""
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase )
def A (self : Any ):
self.config_tester.run_common_tests()
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCAmelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase = '''google/pegasus-xsum'''
@cached_property
def A (self : Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A (self : List[Any] ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A (self : Any , **_lowerCAmelCase : Tuple ):
A = self.translate_src_text(**_lowerCAmelCase )
assert self.expected_text == generated_words
def A (self : Dict , **_lowerCAmelCase : Tuple ):
A = self.tokenizer(self.src_text , **_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""tf""" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCAmelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def A (self : str ):
self._assert_generated_batch_equal_expected()
| 337 |
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( a_ ,a_=0.999 ,a_="cosine" ,) -> List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase : List[Any] =[]
for i in range(a_ ):
__UpperCamelCase : List[str] =i / num_diffusion_timesteps
__UpperCamelCase : Optional[int] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ : Optional[int] =2
@register_to_config
def __init__( self , lowerCamelCase__ = 1000 , lowerCamelCase__ = 0.00_085 , lowerCamelCase__ = 0.012 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__UpperCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase : str =torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : Optional[Any] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : Optional[int] =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__UpperCamelCase : str =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__UpperCamelCase : Union[str, Any] =1.0 - self.betas
__UpperCamelCase : str =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =use_karras_sigmas
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if schedule_timesteps is None:
__UpperCamelCase : Union[str, Any] =self.timesteps
__UpperCamelCase : Tuple =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase : Tuple =1 if len(lowerCamelCase__ ) > 1 else 0
else:
__UpperCamelCase : Union[str, Any] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
__UpperCamelCase : List[str] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Optional[Any] =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : List[str] =num_inference_steps
__UpperCamelCase : Union[str, Any] =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase : Dict =np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase : List[str] =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : List[str] =(np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase : Optional[Any] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : Any =(np.arange(lowerCamelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__UpperCamelCase : List[Any] =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase : int =np.log(lowerCamelCase__ )
__UpperCamelCase : str =np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__ ) ) , lowerCamelCase__ )
if self.config.use_karras_sigmas:
__UpperCamelCase : Optional[Any] =self._convert_to_karras(in_sigmas=lowerCamelCase__ , num_inference_steps=self.num_inference_steps )
__UpperCamelCase : List[Any] =np.array([self._sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) for sigma in sigmas] )
__UpperCamelCase : List[Any] =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase : List[str] =torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase : List[Any] =torch.from_numpy(lowerCamelCase__ )
__UpperCamelCase : str =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase__ ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase : Optional[int] =timesteps.to(lowerCamelCase__ , dtype=torch.floataa )
else:
__UpperCamelCase : List[Any] =timesteps.to(device=lowerCamelCase__ )
# empty dt and derivative
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[Any] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase : List[str] =defaultdict(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =np.log(lowerCamelCase__ )
# get distribution
__UpperCamelCase : Any =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__UpperCamelCase : Any =np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__UpperCamelCase : Optional[int] =low_idx + 1
__UpperCamelCase : Optional[int] =log_sigmas[low_idx]
__UpperCamelCase : Optional[int] =log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase : Any =(low - log_sigma) / (low - high)
__UpperCamelCase : int =np.clip(lowerCamelCase__ , 0 , 1 )
# transform interpolation to time range
__UpperCamelCase : Tuple =(1 - w) * low_idx + w * high_idx
__UpperCamelCase : Optional[int] =t.reshape(sigma.shape )
return t
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : float =in_sigmas[-1].item()
__UpperCamelCase : float =in_sigmas[0].item()
__UpperCamelCase : Dict =7.0 # 7.0 is the value used in the paper
__UpperCamelCase : str =np.linspace(0 , 1 , lowerCamelCase__ )
__UpperCamelCase : int =sigma_min ** (1 / rho)
__UpperCamelCase : Tuple =sigma_max ** (1 / rho)
__UpperCamelCase : Dict =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self ):
"""simple docstring"""
return self.dt is None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
# advance index counter by 1
__UpperCamelCase : Optional[int] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Tuple =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__UpperCamelCase : Union[str, Any] =self.sigmas[step_index - 1]
__UpperCamelCase : int =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase : Any =0
__UpperCamelCase : Union[str, Any] =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase : Optional[int] =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Tuple =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Dict =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Union[str, Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__UpperCamelCase : Dict =model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__UpperCamelCase : Any =pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase : int =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase : List[str] =sigma_next - sigma_hat
# store for 2nd order step
__UpperCamelCase : Optional[Any] =derivative
__UpperCamelCase : Optional[Any] =dt
__UpperCamelCase : Optional[int] =sample
else:
# 2. 2nd order / Heun's method
__UpperCamelCase : Any =(sample - pred_original_sample) / sigma_next
__UpperCamelCase : List[str] =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__UpperCamelCase : Optional[Any] =self.dt
__UpperCamelCase : Union[str, Any] =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : str =None
__UpperCamelCase : str =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ):
# mps does not support float64
__UpperCamelCase : Tuple =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCamelCase : Optional[Any] =self.timesteps.to(original_samples.device )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device )
__UpperCamelCase : List[str] =[self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__ ) for t in timesteps]
__UpperCamelCase : Optional[int] =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[str] =sigma.unsqueeze(-1 )
__UpperCamelCase : Tuple =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Any = 2
_UpperCAmelCase : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase__ )
if n > 1:
factors.append(UpperCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=a ):
'''simple docstring'''
a__ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A , **A ) -> int:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 68 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case_ : str = None
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Dict = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : List[str] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
snake_case_ : Union[str, Any] = {
"google/rembert": 256,
}
snake_case_ : Optional[int] = "▁"
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = RemBertTokenizer
def __init__( self : Tuple , _snake_case : Any=None , _snake_case : Dict=None , _snake_case : int=True , _snake_case : List[Any]=True , _snake_case : Tuple=False , _snake_case : Optional[int]="[CLS]" , _snake_case : str="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : int="[SEP]" , _snake_case : Optional[Any]="<pad>" , _snake_case : List[Any]="[CLS]" , _snake_case : Dict="[MASK]" , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def lowerCamelCase ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1]
return [1] + ([0] * len(_snake_case)) + [1]
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_snake_case))
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case):
copyfile(self.vocab_file , _snake_case)
return (out_vocab_file,)
| 51 |
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = True , lowercase = "arrow" , **lowercase , ) -> List[str]:
super().__init__(
split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , **lowercase , )
__UpperCamelCase = load_from_cache_file
__UpperCamelCase = file_format
__UpperCamelCase = Spark(
df=lowercase , features=lowercase , cache_dir=lowercase , working_dir=lowercase , **lowercase , )
def __lowerCamelCase ( self ) -> Any:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__UpperCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 243 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Union[str, Any]:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> List[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Optional[int]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 243 | 1 |
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase_ : Union[str, Any] = [True] * (num + 1)
UpperCAmelCase_ : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _a ):
UpperCAmelCase_ : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 345 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def _a ( a :Vector , a :Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(a ) - np.asarray(a )) ** 2 ) )
def _a ( a :Vector , a :Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(a , a ) ) ** (1 / 2)
if __name__ == "__main__":
def _a ( ) -> None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 26 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__SCREAMING_SNAKE_CASE :Tuple = parse(importlib.metadata.version('''torch'''))
def UpperCAmelCase_ ( __lowercase : Union[str, Version] , __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
_UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = parse(importlib.metadata.version(__lowercase ) )
return operation(__lowercase , parse(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
return compare_versions(__lowercase , __lowercase , __lowercase )
| 22 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """Hello world! cécé herlolip"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
A_ : Dict = roberta.model.encoder.sentence_encoder
A_ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
A_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE )
A_ : List[str] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : str = roberta_sent_encoder.embed_tokens.weight
A_ : int = roberta_sent_encoder.embed_positions.weight
A_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
A_ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Dict = roberta_layer.self_attn_layer_norm.weight
A_ : str = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : str = roberta_layer.self_attn.q_proj.weight
A_ : List[str] = roberta_layer.self_attn.q_proj.bias
A_ : int = roberta_layer.self_attn.k_proj.weight
A_ : List[Any] = roberta_layer.self_attn.k_proj.bias
A_ : Dict = roberta_layer.self_attn.v_proj.weight
A_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : Any = roberta_layer.self_attn.out_proj.weight
A_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Any = roberta_layer.final_layer_norm.weight
A_ : int = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : int = roberta_layer.fca.weight
A_ : List[str] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[int] = roberta_layer.fca.weight
A_ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : int = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : int = roberta.model.encoder.lm_head.dense.weight
A_ : List[str] = roberta.model.encoder.lm_head.dense.bias
A_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : Optional[int] = roberta.model.encoder.lm_head.weight
A_ : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
A_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
A_ : int = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 186 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'dandelin/vilt-b32-finetuned-vqa'
__UpperCAmelCase : int = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__UpperCAmelCase : Any = 'image_qa'
__UpperCAmelCase : Union[str, Any] = AutoProcessor
__UpperCAmelCase : str = AutoModelForVisualQuestionAnswering
__UpperCAmelCase : List[str] = ['image', 'text']
__UpperCAmelCase : List[str] = ['text']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''vision'''] )
super().__init__(*_a , **_a )
def __UpperCAmelCase ( self , _a , _a ):
return self.pre_processor(_a , _a , return_tensors='''pt''' )
def __UpperCAmelCase ( self , _a ):
with torch.no_grad():
return self.model(**_a ).logits
def __UpperCAmelCase ( self , _a ):
__a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase : int =logging.get_logger(__name__)
class __a ( A__ ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) | 189 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __a ( A__ , A__ ):
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE : int = 1_28 , SCREAMING_SNAKE_CASE : int = 2_56 , SCREAMING_SNAKE_CASE : float = 2_0_0_0.0 , SCREAMING_SNAKE_CASE : int = 7_68 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 20_48 , SCREAMING_SNAKE_CASE : float = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCamelCase__ : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCamelCase__ : Optional[int] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
self.decoders.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ : Any = self.conditioning_emb(SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ : Optional[int] = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ : Dict = self.position_encoding(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCamelCase__ : Optional[Any] = self.dropout(SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCamelCase__ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ : Optional[int] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ : int = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ : List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ : int = lyr(
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase__ : Tuple = self.decoder_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.post_dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.spec_out(SCREAMING_SNAKE_CASE )
return spec_out
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.layer[0](
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCamelCase__ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ : Tuple = self.layer[1](
SCREAMING_SNAKE_CASE , key_value_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ : Any = self.layer[-1](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (hidden_states,)
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : List[Any] = self.FiLMLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCamelCase__ : Optional[Any] = self.attention(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.attention(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ : Optional[Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return layer_output
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Any = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : Optional[int] = self.film(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.DenseReluDense(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Tuple = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = NewGELUActivation()
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.act(self.wi_a(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict = self.wi_a(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = hidden_gelu * hidden_linear
UpperCamelCase__ : int = self.dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.wo(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = eps
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __a ( nn.Module ):
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(SCREAMING_SNAKE_CASE , 3.0 )) ))
class __a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(SCREAMING_SNAKE_CASE , out_features * 2 , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = self.scale_bias(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : List[str] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCamelCase__ : Dict = x * (1 + scale) + shift
return x | 189 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : List[Any] = AlbertConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase : Tuple = AlbertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 358 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128 | 0 |
from math import sqrt
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE_: Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE_: List[Any] = False
for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
break
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool"
return status
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE_: List[str] = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE_: Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE_: List[Any] = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE_: str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE_: List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE_: Dict = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE_: Union[str, Any] = 2
SCREAMING_SNAKE_CASE_: List[str] = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE_: int = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE_: List[Any] = prime_factorization(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE_: Dict = prime_factorization(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A_ ( _UpperCAmelCase ):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE_: Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE_: Optional[int] = get_prime_numbers(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase )
# run variable for while-loops.
SCREAMING_SNAKE_CASE_: Any = 0
SCREAMING_SNAKE_CASE_: List[str] = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE_: int = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE_: str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE_: Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE_: Dict = 0
while numbera != 0:
SCREAMING_SNAKE_CASE_: Union[str, Any] = numbera % numbera
SCREAMING_SNAKE_CASE_: List[str] = numbera
SCREAMING_SNAKE_CASE_: Tuple = rest
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE_: str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE_: Optional[int] = prime_factorization(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE_: int = []
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: str = max(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE_: Any = prime_fac_a.count(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE_: int = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE_: Union[str, Any] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE_: List[str] = 0
SCREAMING_SNAKE_CASE_: Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE_: List[str] = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE_: int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE_: Optional[int] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE_: Optional[Any] = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE_: Union[str, Any] = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE_: Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A_ ( _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Any = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE_: Optional[int] = ans
ans += fiba
SCREAMING_SNAKE_CASE_: Union[str, Any] = tmp
return ans
| 13 | """simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
A__ = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
A__ = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace('.weight' , '' ).replace('.bias' , '' )
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
A__ = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = max_memory
A__ = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if modules_to_not_convert is None:
A__ = []
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
"""simple docstring"""
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = '.'.join(UpperCamelCase__ )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = True
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with init_empty_weights():
A__ = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(UpperCamelCase__ , [] )
A__ = len(UpperCamelCase__ ) > 0
# Check if it is a base model
A__ = False
if hasattr(UpperCamelCase__ , 'base_model_prefix' ):
A__ = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
A__ = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
A__ = ['.weight', '.bias']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(UpperCamelCase__ , '' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split('.' )
for split in splits[:-1]:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 'meta' , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) )
| 221 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : Any = ['onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
requires_backends(cls , ['onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['onnx'] )
| 197 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class a__ ( __snake_case ):
def __init__( self , **UpperCAmelCase ) -> List[str]:
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple:
if "text_queries" in kwargs:
__a = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
__a = {'image': image, 'candidate_labels': candidate_labels}
else:
__a = image
__a = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> List[str]:
__a = {}
if "threshold" in kwargs:
__a = kwargs['threshold']
if "top_k" in kwargs:
__a = kwargs['top_k']
return {}, {}, postprocess_params
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Union[str, Any]:
__a = load_image(inputs['image'] )
__a = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = candidate_labels.split(',' )
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
__a = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
__a = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
__a = model_inputs.pop('target_size' )
__a = model_inputs.pop('candidate_label' )
__a = model_inputs.pop('is_last' )
__a = self.model(**UpperCAmelCase )
__a = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0.1 , UpperCAmelCase=None ) -> Tuple:
__a = []
for model_output in model_outputs:
__a = model_output['candidate_label']
__a = BaseModelOutput(UpperCAmelCase )
__a = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__a = outputs['scores'][index].item()
__a = self._get_bounding_box(outputs['boxes'][index][0] )
__a = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
__a = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
__a = results[:top_k]
return results
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__a , __a , __a , __a = box.int().tolist()
__a = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 197 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Tuple = '▁'
__lowerCAmelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertGenerationTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : int ) -> List[Any]:
super().setUp()
a = BertGenerationTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a = "<s>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __UpperCAmelCase ( self : List[str] ) -> int:
a = BertGenerationTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : Dict ) -> int:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = "Hello World!"
a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@require_torch
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = " ".join(__lowerCamelCase )
a = self.big_tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" , return_token_type_ids=__lowerCamelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__lowerCamelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCamelCase )
model(**__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
# fmt: off
a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 107 |
"""simple docstring"""
from __future__ import annotations
__A = 1.6_021e-19 # units = C
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 0 |
"""simple docstring"""
def __lowercase ( snake_case_ : str ,snake_case_ : str ) ->str:
'''simple docstring'''
__A : int = len(snake_case_ )
__A : int = len(snake_case_ )
__A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__A : list = []
for char_count in range(snake_case_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case_ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.