code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__ :Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase , cache_dir=__lowerCAmelCase )
__magic_name__ :Dict = [t[-1] for t in os.walk(os.path.join(__lowerCAmelCase , os.listdir(__lowerCAmelCase )[0] , '''snapshots''' ) )]
__magic_name__ :Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :int = jax.random.PRNGKey(0 )
__magic_name__ :Any = 4
__magic_name__ :str = jax.device_count()
__magic_name__ :List[str] = num_samples * [prompt]
__magic_name__ :str = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :int = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__magic_name__ :List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCAmelCase ) == num_samples
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :List[str] = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Tuple = jax.device_count()
__magic_name__ :Any = num_samples * [prompt]
__magic_name__ :Dict = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :List[str] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = shard(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase )
__magic_name__ :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :List[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Dict = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = shard(__lowerCAmelCase )
__magic_name__ :List[str] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__magic_name__ :Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Any = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :Optional[Any] = num_samples * [prompt]
__magic_name__ :Optional[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Tuple = replicate(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
__magic_name__ , __magic_name__ :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = scheduler.create_state()
__magic_name__ :Optional[int] = scheduler_state
__magic_name__ :int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Dict = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[str] = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :str = jax.device_count()
__magic_name__ :str = num_samples * [prompt]
__magic_name__ :Dict = jax.random.split(jax.random.PRNGKey(0 ) , __lowerCAmelCase )
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = replicate(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :int = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__magic_name__ , __magic_name__ :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , use_memory_efficient_attention=__lowerCAmelCase , )
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :Any = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowercase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowercase , help='where to store parsed gold_data_path file' , )
__UpperCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__UpperCamelCase = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
__UpperCamelCase = dpr_record['question']
__UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowercase ) + '\n' )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import requests
lowerCAmelCase : List[str] = '' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def A_( A : str = "Chicago" , A : str = APPID):
return requests.get(URL_BASE + 'weather' , params=locals()).json()
def A_( A : str = "Kolkata, India" , A : str = APPID):
return requests.get(URL_BASE + 'forecast' , params=locals()).json()
def A_( A : float = 55.68 , A : float = 12.57 , A : str = APPID):
return requests.get(URL_BASE + 'onecall' , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase : Optional[int] = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 3 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
snake_case__ = (DDIMParallelScheduler,)
snake_case__ = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**_snake_case )
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase ,lowerCAmelCase = 10, 0.0
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase = model(_snake_case , _snake_case )
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_snake_case , num_inference_steps=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_snake_case , eta=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case )
lowerCAmelCase ,lowerCAmelCase = 10, 0.0
scheduler.set_timesteps(_snake_case )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = self.dummy_sample_deter + 0.1
lowerCAmelCase = self.dummy_sample_deter - 0.1
lowerCAmelCase = samplea.shape[0]
lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase = torch.arange(_snake_case )[0:3, None].repeat(1 , _snake_case )
lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase = scheduler.batch_step_no_noise(_snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _snake_case )
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 0 |
'''simple docstring'''
import os
def A ():
with open(os.path.dirname(__lowerCamelCase ) + """/p022_names.txt""" ) as file:
_lowerCAmelCase = str(file.readlines()[0] )
_lowerCAmelCase = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 5 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("""encoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace(""".attn""" , """.self_attn""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
SCREAMING_SNAKE_CASE__ = sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ = v
_lowerCamelCase = ['START']
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = model["""model"""]
SCREAMING_SNAKE_CASE__ = BlenderbotConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BlenderbotForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 6 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
_A = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 7 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase__ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = [label.strip() for label in labels.split(',') if label.strip()]
return labels
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
raise ValueError('You must include at least one label and at least one sequence.')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_UpperCAmelCase))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [sequences]
__A : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_UpperCAmelCase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = args_parser
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.')
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail'):
return ind
return -1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **_UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`')
__A : Any = self.tokenizer.eos_token
try:
__A : Dict = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , )
except Exception as e:
if "too short" in str(_UpperCAmelCase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__A : Optional[Any] = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
if kwargs.get('multi_class' , _UpperCAmelCase) is not None:
__A : Union[str, Any] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.')
__A : List[Any] = {}
if "candidate_labels" in kwargs:
__A : Dict = self._args_parser._parse_labels(kwargs['candidate_labels'])
if "hypothesis_template" in kwargs:
__A : Optional[Any] = kwargs['hypothesis_template']
__A : Tuple = {}
if "multi_label" in kwargs:
__A : Dict = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase , ):
'''simple docstring'''
if len(_UpperCAmelCase) == 0:
pass
elif len(_UpperCAmelCase) == 1 and "candidate_labels" not in kwargs:
__A : Dict = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}')
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="This example is {}."):
'''simple docstring'''
__A ,__A : int = self._args_parser(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for i, (candidate_label, sequence_pair) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase)):
__A : Optional[int] = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_UpperCAmelCase) - 1,
**model_input,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = inputs['candidate_label']
__A : Any = inputs['sequence']
__A : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
__A : Optional[int] = self.model(**_UpperCAmelCase)
__A : Optional[Any] = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[int] = [outputs['candidate_label'] for outputs in model_outputs]
__A : List[Any] = [outputs['sequence'] for outputs in model_outputs]
__A : int = np.concatenate([output['logits'].numpy() for output in model_outputs])
__A : Optional[Any] = logits.shape[0]
__A : Optional[int] = len(_UpperCAmelCase)
__A : Tuple = N // n
__A : List[Any] = logits.reshape((num_sequences, n, -1))
if multi_label or len(_UpperCAmelCase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__A : Any = self.entailment_id
__A : List[Any] = -1 if entailment_id == 0 else 0
__A : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
__A : str = np.exp(_UpperCAmelCase) / np.exp(_UpperCAmelCase).sum(-1 , keepdims=_UpperCAmelCase)
__A : Any = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__A : List[str] = reshaped_outputs[..., self.entailment_id]
__A : str = np.exp(_UpperCAmelCase) / np.exp(_UpperCAmelCase).sum(-1 , keepdims=_UpperCAmelCase)
__A : Dict = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 8 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( __UpperCamelCase ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def A ( __UpperCamelCase ) -> str:
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def A ( __UpperCamelCase ) -> str:
A__ = set()
for token in tokens:
A__ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A__ = list(__UpperCamelCase )
return word_list
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
if not chinese_word_set:
return bert_tokens
A__ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A__ = bert_tokens
A__ , A__ = 0, len(__UpperCamelCase )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
A__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = '##' + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
A__ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A__ = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def A ( __UpperCamelCase ) -> List[str]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
A__ = [json.dumps(__UpperCamelCase ) + '\n' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 9 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "vision-encoder-decoder"
UpperCAmelCase = True
def __init__( self : Optional[Any] , **_A : Optional[int] ):
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = AutoConfig.for_model(_A , **_A )
_UpperCamelCase = True
@classmethod
def UpperCamelCase_ ( cls : Tuple , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Union[str, Any] ):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : Tuple ):
return 1e-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = OrderedDict()
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def UpperCamelCase_ ( self : List[str] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ):
import torch
_UpperCamelCase = OrderedDict()
_UpperCamelCase = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
_UpperCamelCase , _UpperCamelCase = dummy_input['''input_ids'''].shape
_UpperCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCamelCase = dummy_input.pop('''input_ids''' )
_UpperCamelCase = dummy_input.pop('''attention_mask''' )
_UpperCamelCase = torch.zeros(_A )
return common_inputs
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : Optional[int] , _A : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def UpperCamelCase_ ( self : str , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ):
_UpperCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
| 10 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase ():
"""simple docstring"""
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(__A)
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost])
adjancency[nodea].append([nodea, cost])
_a = mst(__A)
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2])
_a = tuple(edge[::-1])
assert edge in result or reverse in result
| 11 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 0 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import collections
import os
import re
from pathlib import Path
a__ = '''src/transformers'''
# Matches is_xxx_available()
a__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a__ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
a__ = re.compile(R'''^\s*else:''')
def __UpperCAmelCase ( __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if _re_test_backend.search(__a ) is None:
return None
_a : List[str] = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __UpperCAmelCase ( __a : Optional[int] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : List[str] = f.readlines()
_a : List[str] = 0
while line_index < len(__a ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
_a : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_a : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
_a : Optional[Any] = _re_one_line_import_struct.search(__a ).groups()[0]
_a : Optional[int] = re.findall(R'''\[([^\]]+)\]''' ,__a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_a : Optional[Any] = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
_a : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_a : int = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_a : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_a : str = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
_a : Any = _re_import_struct_add_many.search(__a ).groups()[0].split(''', ''' )
_a : List[Any] = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
_a : List[Any] = _re_between_brackets.search(__a ).groups()[0].split(''', ''' )
_a : Dict = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_a : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_a : str = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_a : Union[str, Any] = lines[line_index]
_a : Optional[int] = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_a : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
_a : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_a : Tuple = lines[line_index]
_a : Optional[Any] = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_a : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> Tuple:
"""simple docstring"""
def find_duplicates(__a : str ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_a : Union[str, Any] = []
for key in import_dict_objects.keys():
_a : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_a : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_a : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
_a : Optional[Any] = os.path.join(__a ,'''__init__.py''' )
_a : Union[str, Any] = parse_init(__a )
if objects is not None:
_a : Optional[int] = analyze_results(*__a )
if len(__a ) > 0:
_a : int = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__a ) )
if len(__a ) > 0:
raise ValueError('''\n\n'''.join(__a ) )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Dict = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_a : Optional[Any] = str((Path(__a ) / folder).relative_to(__a ) )
_a : int = short_path.replace(os.path.sep ,'''.''' )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
_a : List[Any] = str((Path(__a ) / fname).relative_to(__a ) )
_a : Dict = short_path.replace('''.py''' ,'''''' ).replace(os.path.sep ,'''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__a )
return submodules
a__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
from transformers.utils import direct_transformers_import
_a : str = direct_transformers_import(__a )
_a : Dict = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a ,'''__init__.py''' ) ,'''r''' ) as f:
_a : int = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' ,__a ) ) )
_a : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
_a : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 14 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 0 |
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number > 0:
lowercase__ = number % 10
sum_of_digits += last_digit
lowercase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase ( __magic_name__ : int = 100 ) -> int:
"""simple docstring"""
lowercase__ = factorial(__magic_name__ )
lowercase__ = split_and_add(__magic_name__ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 15 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : Tuple = get_tests_dir('fixtures')
__A : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__A : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = 0
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ).to_dict()
config_dict.pop("feature_extractor_type" )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(**__lowerCamelCase )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
config.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("bert-base" )
def _snake_case ( self : Any ):
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self : str ):
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _snake_case ( self : Optional[Any] ):
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : Dict ):
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = True
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__lowerCamelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 16 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Dict = {
'''allenai/led-base-16384''': 16_384,
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = LEDTokenizer
_lowercase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , __A : Dict=None , __A : Tuple=None , __A : Optional[int]=None , __A : Any="replace" , __A : Any="<s>" , __A : Any="</s>" , __A : List[str]="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Optional[int]="<pad>" , __A : List[Any]="<mask>" , __A : List[Any]=False , __A : Tuple=True , **__A : Optional[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
__A : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__A : int = getattr(__A , pre_tok_state.pop("""type""" ) )
__A : Optional[Any] = add_prefix_space
__A : Optional[int] = pre_tok_class(**__A )
__A : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__A : Optional[int] = """post_processor"""
__A : Tuple = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
__A : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : Dict = tuple(state["""sep"""] )
if "cls" in state:
__A : Optional[Any] = tuple(state["""cls"""] )
__A : Optional[Any] = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__A : Tuple = add_prefix_space
__A : Union[str, Any] = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
__A : int = trim_offsets
__A : Optional[Any] = True
if changes_to_apply:
__A : Union[str, Any] = getattr(__A , state.pop("""type""" ) )
__A : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase_ ( self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
__A : List[Any] = value
def lowerCAmelCase_ ( self : int , *__A : Optional[int] , **__A : Optional[int] ):
__A : Any = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def lowerCAmelCase_ ( self : List[str] , *__A : Tuple , **__A : Optional[Any] ):
__A : Tuple = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def lowerCAmelCase_ ( self : Dict , __A : str , __A : Optional[str] = None ):
__A : List[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def lowerCAmelCase_ ( self : int , __A : int , __A : Tuple=None ):
__A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : str , __A : List[int] , __A : Optional[List[int]] = None ):
__A : Dict = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Tuple , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
__A : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
__A : Dict = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(__A )
if needs_to_be_padded:
__A : Any = len(__A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 17 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = ""
else:
_lowerCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 1000
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = int(deit_name[-6:-4] )
_lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowerCAmelCase = 192
_lowerCAmelCase = 768
_lowerCAmelCase = 12
_lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
_lowerCAmelCase = 384
_lowerCAmelCase = 1536
_lowerCAmelCase = 12
_lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
# load original model from timm
_lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = timm_model.state_dict()
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
_lowerCAmelCase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE_ , crop_size=config.image_size )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase = encoding["pixel_values"]
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 18 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=__snake_case, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=__snake_case, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=__snake_case )
return parser.parse_args()
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parse_args()
# Import training_script as a module.
_UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCamelCase = script_fpath.stem
_UpperCamelCase = importlib.import_module(__snake_case )
# Patch sys.argv
_UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 19 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 0 |
def _lowercase( __a : int = 100_0000 ):
a__ =[i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 20 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 0 |
import numpy
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
UpperCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_ = numpy.zeros(output_array.shape )
def _UpperCAmelCase ( self ) -> numpy.ndarray:
UpperCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _UpperCAmelCase ( self ) -> None:
UpperCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = input_arr
UpperCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case (__lowercase):
return 1 / (1 + numpy.exp(-value))
def _snake_case (__lowercase):
return (value) * (1 - (value))
def _snake_case ():
UpperCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
UpperCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase , output_array=__lowercase)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase , iterations=10 , give_loss=__lowercase)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 23 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__snake_case = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
__snake_case = f.readlines()
__snake_case = []
for line in triangle:
__snake_case = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 24 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ ='convnextv2'
def __init__( self : List[Any] , a : List[str]=3 , a : List[str]=4 , a : Union[str, Any]=4 , a : Optional[Any]=None , a : Tuple=None , a : Optional[int]="gelu" , a : int=0.02 , a : Optional[int]=1e-12 , a : int=0.0 , a : Optional[Any]=224 , a : Union[str, Any]=None , a : Union[str, Any]=None , **a : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : List[Any] = num_stages
SCREAMING_SNAKE_CASE : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE : Any = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : List[str] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names ) | 25 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: Union[str, Any] = '''longformer'''
def __init__( self : List[str] , __magic_name__ : Union[List[int], int] = 5_12 , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 0 , __magic_name__ : int = 2 , __magic_name__ : int = 3_05_22 , __magic_name__ : int = 7_68 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : int = 30_72 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 5_12 , __magic_name__ : int = 2 , __magic_name__ : float = 0.02 , __magic_name__ : float = 1E-12 , __magic_name__ : bool = False , **__magic_name__ : str , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
__snake_case : int = attention_window
__snake_case : Optional[int] = sep_token_id
__snake_case : List[Any] = bos_token_id
__snake_case : Tuple = eos_token_id
__snake_case : List[str] = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Optional[Any] = onnx_export
class _A ( __lowercase ):
def __init__( self : Dict , __magic_name__ : "PretrainedConfig" , __magic_name__ : str = "default" , __magic_name__ : "List[PatchingSpec]" = None ) -> Any:
"""simple docstring"""
super().__init__(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Optional[int] = True
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : int = super().outputs
if self.task == "default":
__snake_case : Tuple = {0: """batch"""}
return outputs
@property
def lowercase__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def lowercase__ ( self : Dict , __magic_name__ : "PreTrainedTokenizerBase" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = super().generate_dummy_inputs(
preprocessor=__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__snake_case : List[str] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
__snake_case : Optional[Any] = 1
return inputs
| 26 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'char'
__magic_name__ = 'bpe'
__magic_name__ = 'wp'
__A : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['image_processor', 'char_tokenizer']
__magic_name__ = 'ViTImageProcessor'
__magic_name__ = 'MgpstrTokenizer'
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_A = tokenizer
_A = AutoTokenizer.from_pretrained('gpt2' )
_A = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_A = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None:
_A = self.char_tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings['input_ids']
return inputs
def lowerCAmelCase__ ( self , snake_case_ ):
_A, _A, _A = sequences
_A = char_preds.size(0 )
_A, _A = self._decode_helper(snake_case_ , 'char' )
_A, _A = self._decode_helper(snake_case_ , 'bpe' )
_A, _A = self._decode_helper(snake_case_ , 'wp' )
_A = []
_A = []
for i in range(snake_case_ ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(snake_case_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = '[s]'
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = '#'
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 102
_A = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
_A, _A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A, _A = pred_logits.topk(1 , dim=-1 , largest=snake_case_ , sorted=snake_case_ )
_A = preds_index.view(-1 , snake_case_ )[:, 1:]
_A = decoder(snake_case_ )
_A, _A = torch.nn.functional.softmax(snake_case_ , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(snake_case_ ):
_A = preds_str[index].find(snake_case_ )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(snake_case_ ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case_ )
conf_scores.append(snake_case_ )
return dec_strs, conf_scores
def lowerCAmelCase__ ( self , snake_case_ ):
_A = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(snake_case_ )]
return decode_strs
def lowerCAmelCase__ ( self , snake_case_ ):
return self.bpe_tokenizer.batch_decode(snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(snake_case_ )]
return decode_strs
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
while number > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : Optional[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase__( __UpperCamelCase: int = 1_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = factorial(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 28 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
def constraint_to_multiple_of(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ):
lowerCamelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCamelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCamelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCamelCase_ = (output_size, output_size) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else output_size
lowerCamelCase_ , lowerCamelCase_ = get_image_size(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = output_size
# determine new height and width
lowerCamelCase_ = output_height / input_height
lowerCamelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCamelCase_ = scale_width
else:
# fit height
lowerCamelCase_ = scale_height
lowerCamelCase_ = constraint_to_multiple_of(scale_height * input_height ,multiple=lowerCAmelCase__ )
lowerCamelCase_ = constraint_to_multiple_of(scale_width * input_width ,multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __lowerCamelCase ( lowerCAmelCase ):
a__: int = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = False , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
lowerCamelCase_ = get_size_dict(UpperCAmelCase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = keep_aspect_ratio
lowerCamelCase_ = ensure_multiple_of
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = 1 , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCamelCase_ = get_resize_output_image_size(
UpperCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase , multiple=UpperCAmelCase , )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(UpperCAmelCase )
lowerCamelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCamelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase ):
lowerCamelCase_ = target_sizes.numpy()
lowerCamelCase_ = []
for idx in range(len(UpperCAmelCase ) ):
lowerCamelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase )
lowerCamelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase )
else:
lowerCamelCase_ = logits.argmax(dim=1 )
lowerCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 29 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : List[str] = model_name.find('''patch''' )
UpperCAmelCase_ : Optional[int] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ : List[str] = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = 768
UpperCAmelCase_ : Dict = 3072
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : str = 1024
UpperCAmelCase_ : Any = 4096
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : Optional[Any] = 24
UpperCAmelCase_ : List[Any] = 768
UpperCAmelCase_ : Dict = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : int = 336
UpperCAmelCase_ : List[Any] = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
UpperCAmelCase_ : str = 768
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
UpperCAmelCase_ : Optional[int] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ : Any = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ : Any = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ : List[Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ : Any = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : str = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ : Union[str, Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ : Dict = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : int = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
UpperCAmelCase_ : Optional[int] = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ : List[Any] = key_split[3]
UpperCAmelCase_ : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : List[str] = val[
:dim, :
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[
:dim
]
UpperCAmelCase_ : Any = val[
dim : dim * 2
]
UpperCAmelCase_ : List[str] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[
:dim, :
]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[:dim]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ : Optional[Any] = key_split[2]
UpperCAmelCase_ : Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[:dim]
UpperCAmelCase_ : List[Any] = val[dim : dim * 2]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : List[str] = key_split[2]
UpperCAmelCase_ : List[str] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[int] = val[:dim]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = val[-dim:]
else:
UpperCAmelCase_ : int = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : Dict = val.T
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase_ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
UpperCAmelCase_ : Union[str, Any] = '''eating_spaghetti.npy'''
elif num_frames == 32:
UpperCAmelCase_ : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=_lowercase , repo_type='''dataset''' , )
UpperCAmelCase_ : Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase=None , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ : Optional[int] = model_to_url[model_name]
UpperCAmelCase_ : Dict = 8
if "16-frames" in model_name:
UpperCAmelCase_ : List[Any] = 16
elif "shot" in model_name:
UpperCAmelCase_ : Tuple = 32
UpperCAmelCase_ : Any = get_xclip_config(_lowercase , _lowercase )
UpperCAmelCase_ : str = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : int = '''pytorch_model.bin'''
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowercase )['''model''']
UpperCAmelCase_ : Dict = convert_state_dict(_lowercase , _lowercase )
UpperCAmelCase_ : int = XCLIPModel(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Optional[Any] = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
UpperCAmelCase_ : List[str] = VideoMAEImageProcessor(size=_lowercase )
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : List[Any] = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
UpperCAmelCase_ : Dict = prepare_video(_lowercase )
UpperCAmelCase_ : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=_lowercase , return_tensors='''pt''' , padding=_lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_lowercase )
# Verify outputs
UpperCAmelCase_ : Dict = outputs.logits_per_video
UpperCAmelCase_ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : Any = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : str = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : Dict = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(_lowercase , organization='''nielsr''' )
processor.push_to_hub(_lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(_lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 30 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any=8 ) -> Dict:
SCREAMING_SNAKE_CASE_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : DDPMScheduler , _lowerCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Any=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : int ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Any , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 100 , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase ) | 31 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCAmelCase = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase = [v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in index:
if weight[i] <= capacity:
_UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:List[str] ):
snake_case__ = data
snake_case__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Dict , _a:List[str] ):
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[Any] ):
snake_case__ = list(struct.unpack('''>16L''' , _a ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.padding()
snake_case__ = self.split_blocks()
for block in self.blocks:
snake_case__ = self.expand_block(_a )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case__ = (b & c) | ((~b) & d)
snake_case__ = 0X5A_827_999
elif 20 <= i < 40:
snake_case__ = b ^ c ^ d
snake_case__ = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case__ = (b & c) | (b & d) | (c & d)
snake_case__ = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case__ = b ^ c ^ d
snake_case__ = 0XCA_62C_1D6
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(_a , 30 ),
c,
d,
)
snake_case__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = B'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
snake_case__ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case__ = parser.parse_args()
snake_case__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case__ = f.read()
else:
snake_case__ = bytes(__lowerCAmelCase , '''utf-8''' )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 33 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE_ = {'target_lang': 'fi', 'source_lang': 'en'}
SCREAMING_SNAKE_CASE_ = '>>zh<<'
SCREAMING_SNAKE_CASE_ = 'Helsinki-NLP/'
if is_torch_available():
SCREAMING_SNAKE_CASE_ = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = 'tf'
else:
SCREAMING_SNAKE_CASE_ = 'jax'
@require_sentencepiece
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = MarianTokenizer
A_ = False
A_ = True
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().setUp()
UpperCamelCase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = Path(self.tmpdirname)
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
UpperCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = '''</s>'''
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_) , lowerCamelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(lowerCamelCase_) , 9)
def UpperCAmelCase__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de')
UpperCamelCase = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowerCamelCase_ , batch.input_ids[0])
UpperCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = [x.name for x in Path(lowerCamelCase_).glob('''*''')]
self.assertIn('''source.spm''' , lowerCamelCase_)
MarianTokenizer.from_pretrained(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0))
@slow
def UpperCAmelCase__ ( self) -> List[str]:
# fmt: off
UpperCamelCase = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
UpperCamelCase = '''Tämä on testi'''
UpperCamelCase = '''This is a test'''
UpperCamelCase = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase = tokenizer(lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer(text_target=lowerCamelCase_).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_) | 34 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
a_ :Optional[int] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str , *_lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f"""Loading model {model_type}""" )
SCREAMING_SNAKE_CASE__ : Dict = model_type
SCREAMING_SNAKE_CASE__ : Any = tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = pytorch_dump_output
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config
SCREAMING_SNAKE_CASE__ : int = finetuning_task_name
def lowercase__ ( self : Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : int = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : Optional[int] = ''''''
else:
SCREAMING_SNAKE_CASE__ : int = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 35 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''nat'''
__lowerCamelCase : List[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=[3, 4, 6, 5] ,SCREAMING_SNAKE_CASE_=[2, 4, 8, 16] ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=3.0 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Dict = embed_dim
snake_case : List[str] = depths
snake_case : int = len(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = num_heads
snake_case : str = kernel_size
snake_case : Dict = mlp_ratio
snake_case : Any = qkv_bias
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = drop_path_rate
snake_case : Any = hidden_act
snake_case : Tuple = layer_norm_eps
snake_case : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : List[Any] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
snake_case : Any = layer_scale_init_value
snake_case : int = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(SCREAMING_SNAKE_CASE_ ) + 1 )]
snake_case , snake_case : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ ,out_indices=SCREAMING_SNAKE_CASE_ ,stage_names=self.stage_names )
| 36 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
def UpperCamelCase_ ( __a ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
UpperCamelCase : Union[str, Any] = int(input("""Enter number: """).strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 37 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableUnCLIPPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = 3_2
snake_case__ : List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ : Tuple = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
snake_case__ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case__ : List[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL()
snake_case__ : Optional[int] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : List[str] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
snake_case__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : int = pipe("""anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : int = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
snake_case__ : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 38 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[str] = None
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Dict="<pad>" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=False , **_UpperCamelCase : Tuple , ) ->Any:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCamelCase ) != add_prefix_space:
snake_case_ = getattr(_UpperCamelCase , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_UpperCamelCase )
snake_case_ = add_prefix_space
def snake_case__( self : Union[str, Any] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[Any] ) ->BatchEncoding:
snake_case_ = kwargs.get('''is_split_into_words''' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : "Conversation" ) ->List[int]:
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids | 39 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
UpperCamelCase : Tuple = primes[group]['prime']
UpperCamelCase : Optional[int] = primes[group]['generator']
UpperCamelCase : List[str] = int(hexlify(urandom(32 ) ), base=16 )
def snake_case_ ( self ) -> str:
return hex(self.__private_key )[2:]
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[Any] = pow(self.generator, self.__private_key, self.prime )
return hex(SCREAMING_SNAKE_CASE_ )[2:]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE_, (self.prime - 1) // 2, self.prime ) == 1
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : List[str] = int(SCREAMING_SNAKE_CASE_, base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid public key' )
UpperCamelCase : Optional[Any] = pow(SCREAMING_SNAKE_CASE_, self.__private_key, self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE_ ).encode() ).hexdigest()
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE_, (prime - 1) // 2, SCREAMING_SNAKE_CASE_ ) == 1
)
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 14 ) -> str:
UpperCamelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE_, base=16 )
UpperCamelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE_, base=16 )
UpperCamelCase : int = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid public key' )
UpperCamelCase : List[Any] = pow(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return shaaaa(str(SCREAMING_SNAKE_CASE_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase = os.path.dirname(os.path.realpath(A__ ) )
__lowercase = os.path.join(A__ , '''words.txt''' )
__lowercase = ''''''
with open(A__ ) as f:
__lowercase = f.readline()
__lowercase = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__lowercase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'dandelin/vilt-b32-finetuned-vqa'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
SCREAMING_SNAKE_CASE_ = 'image_qa'
SCREAMING_SNAKE_CASE_ = AutoProcessor
SCREAMING_SNAKE_CASE_ = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE_ = ['image', 'text']
SCREAMING_SNAKE_CASE_ = ['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_ ).logits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 42 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( _lowerCAmelCase : bytes , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = F'{sampling_rate}'
_lowerCamelCase : List[str] = "1"
_lowerCamelCase : Union[str, Any] = "f32le"
_lowerCamelCase : Union[str, Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCamelCase : Dict = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_lowerCamelCase : List[Any] = output_stream[0]
_lowerCamelCase : Any = np.frombuffer(_lowerCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : str = "f32le" , ):
"""simple docstring"""
_lowerCamelCase : str = F'{sampling_rate}'
_lowerCamelCase : Optional[int] = "1"
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Union[str, Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
_lowerCamelCase : List[Any] = platform.system()
if system == "Linux":
_lowerCamelCase : Dict = "alsa"
_lowerCamelCase : int = "default"
elif system == "Darwin":
_lowerCamelCase : str = "avfoundation"
_lowerCamelCase : Optional[int] = ":0"
elif system == "Windows":
_lowerCamelCase : Tuple = "dshow"
_lowerCamelCase : Tuple = "default"
_lowerCamelCase : Union[str, Any] = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_lowerCamelCase : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCamelCase : Any = _ffmpeg_stream(_lowerCAmelCase , _lowerCAmelCase )
for item in iterator:
yield item
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCAmelCase : str = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
_lowerCamelCase : Any = stream_chunk_s
else:
_lowerCamelCase : int = chunk_length_s
_lowerCamelCase : int = ffmpeg_microphone(_lowerCAmelCase , _lowerCAmelCase , format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
_lowerCamelCase : int = np.intaa
_lowerCamelCase : Union[str, Any] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Optional[int] = np.floataa
_lowerCamelCase : Dict = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
_lowerCamelCase : str = chunk_length_s / 6
_lowerCamelCase : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase , (int, float) ):
_lowerCamelCase : Optional[Any] = [stride_length_s, stride_length_s]
_lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCamelCase : List[str] = datetime.datetime.now()
_lowerCamelCase : Union[str, Any] = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase , _lowerCAmelCase , stride=(stride_left, stride_right) , stream=_lowerCAmelCase ):
# Put everything back in numpy scale
_lowerCamelCase : Optional[int] = np.frombuffer(item["raw"] , dtype=_lowerCAmelCase )
_lowerCamelCase : Any = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_lowerCamelCase : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple[int, int] , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Any = B""
_lowerCamelCase , _lowerCamelCase : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
_lowerCamelCase : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
_lowerCamelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
_lowerCamelCase : str = (_stride_left, stride_right)
_lowerCamelCase : Optional[Any] = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_lowerCamelCase : Optional[int] = False
yield item
_lowerCamelCase : Tuple = stride_left
_lowerCamelCase : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
_lowerCamelCase : int = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_lowerCamelCase : Dict = False
yield item
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase , stdout=subprocess.PIPE , bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
_lowerCamelCase : str = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error | 44 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) | 45 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCAmelCase__ = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCAmelCase__ = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCAmelCase__ = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCAmelCase__ = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCAmelCase__ = field(
default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCAmelCase__ = field(
default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCAmelCase__ = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCAmelCase__ = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCAmelCase__ = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCAmelCase__ = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCAmelCase__ = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCAmelCase__ = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCAmelCase__ = field(
default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCAmelCase__ = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_a , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCAmelCase__ = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(
default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCAmelCase__ = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCAmelCase__ = field(
default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCAmelCase__ = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 46 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
SCREAMING_SNAKE_CASE__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : Union[str, Any]=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] ):
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Dict ):
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=True ):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : Tuple = os.path.join(lowerCamelCase_ , 'tmp' )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__a : str = read_json(os.path.join(lowerCamelCase_ , 'params.json' ) )
__a : str = NUM_SHARDS[model_size]
__a : List[str] = params['n_layers']
__a : Optional[Any] = params['n_heads']
__a : Optional[Any] = n_heads // num_shards
__a : Optional[int] = params['dim']
__a : Union[str, Any] = dim // n_heads
__a : Dict = 10000.0
__a : Tuple = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__a : Tuple = params['n_kv_heads'] # for GQA / MQA
__a : Union[str, Any] = n_heads_per_shard // num_key_value_heads
__a : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
__a : List[str] = n_heads
__a : List[Any] = n_heads_per_shard
__a : List[str] = dim
# permute for sliced rotary
def permute(lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=n_heads , lowerCamelCase_ : Any=dim , lowerCamelCase_ : List[str]=dim ):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_ ).transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__a : Union[str, Any] = torch.load(os.path.join(lowerCamelCase_ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
__a : Any = [
torch.load(os.path.join(lowerCamelCase_ , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(lowerCamelCase_ )
]
__a : Any = 0
__a : List[str] = {'weight_map': {}}
for layer_i in range(lowerCamelCase_ ):
__a : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Dict = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__a : Optional[int] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__a : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) )
__a : List[str] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
__a : List[Any] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(lowerCamelCase_ )
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_ )
__a : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(lowerCamelCase_ )] , dim=1 )
__a : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(lowerCamelCase_ )] , dim=0 )
__a : Union[str, Any] = inv_freq
for k, v in state_dict.items():
__a : Any = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
__a : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__a : Tuple = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
__a : Optional[int] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(lowerCamelCase_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(lowerCamelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
__a : List[Any] = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
# Write configs
__a : Any = {'total_size': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'pytorch_model.bin.index.json' ) )
__a : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
__a : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_5_6
__a : Any = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
__a : List[str] = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
# Initialize the tokenizer based on the `spm` model
__a : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__a : Optional[Any] = tokenizer_class(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : str = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=lowerCamelCase_ , help='Whether or not to save using `safetensors`.' )
__a : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__a : List[str] = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
import math
def lowercase__ ( snake_case_ :int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__ ( snake_case_ :float = 0.1 ):
__UpperCAmelCase = 3
__UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 0 |
'''simple docstring'''
from functools import reduce
UpperCamelCase : Dict = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def A__ ( __lowerCAmelCase : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase =Features({"image": Image()} )
_lowerCamelCase =Features({"labels": ClassLabel} )
_lowerCamelCase ="image"
_lowerCamelCase ="labels"
def __snake_case ( self : int , a__ : Union[str, Any] ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def __snake_case ( self : List[str] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 51 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=1_3 , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=[1, 2, 1] , lowerCAmelCase_ : Optional[Any]=[2, 2, 4] , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : str=2.0 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Optional[Any]=["stage1", "stage2", "stage3"] , lowerCAmelCase_ : Dict=[1, 2, 3] , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = patch_norm
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = is_training
__lowerCAmelCase = scope
__lowerCAmelCase = use_labels
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = encoder_stride
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Dict ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = MaskFormerSwinModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
__lowerCAmelCase = MaskFormerSwinBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = ['stem']
__lowerCAmelCase = MaskFormerSwinBackbone(config=lowerCAmelCase_ )
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a_ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = MaskFormerSwinModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase ( self : Optional[Any] ) -> List[Any]:
pass
def lowercase ( self : Any ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ) -> List[str]:
return
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
def lowercase ( self : Any ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase ( self : Dict ) -> Any:
pass
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Union[str, Any]:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# Swin has a different seq_length
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase ( self : int ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase ( self : Tuple ) -> List[str]:
pass
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase_ : Dict ):
__lowerCAmelCase = 0
return t
def check_equivalence(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any={} ):
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase_ ) , set_nan_tensor_to_zero(lowerCAmelCase_ ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(lowerCAmelCase_ ).any()} and `inf`: {torch.isinf(lowerCAmelCase_ )}. Dict has"""
f""" `nan`: {torch.isnan(lowerCAmelCase_ ).any()} and `inf`: {torch.isinf(lowerCAmelCase_ )}."""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {'output_hidden_states': True} )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {'output_hidden_states': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
a_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a_ = MaskFormerSwinConfig
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = MaskFormerSwinModelTester(self )
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCAmelCase = backbone_class(lowerCAmelCase_ )
backbone.to(lowerCAmelCase_ )
backbone.eval()
__lowerCAmelCase = backbone(**lowerCAmelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCAmelCase = backbone(**lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCAmelCase = backbone(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertIsNotNone(outputs.attentions )
| 53 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __lowercase ):
_snake_case =['''image_processor''', '''tokenizer''']
_snake_case ='''Pix2StructImageProcessor'''
_snake_case =('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self: Tuple , _lowerCAmelCase: str=None , _lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = 2048 , _lowerCAmelCase: int = 0 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , **_lowerCAmelCase: int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , **_lowerCAmelCase )
else:
# add pixel_values and bbox
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if "attention_mask" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("input_ids" )
else:
UpperCAmelCase_ =None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self: Optional[int] , *_lowerCAmelCase: Any , **_lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer.model_input_names
UpperCAmelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = 'Hello, World!'
SCREAMING_SNAKE_CASE :Optional[Any] = 'en_XX'
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = Path("data_bin" )
__A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_ ).parent ) , checkpoint_file=Path(a_ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(a_ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(a_ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(a_ )
__A = xmod.model.encoder.sentence_encoder
__A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__A = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , a_ )
__A = XmodForSequenceClassification(a_ ) if classification_head else XmodForMaskedLM(a_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = xmod_sent_encoder.embed_tokens.weight
__A = xmod_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__A = xmod_sent_encoder.layernorm_embedding.weight
__A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = xmod_sent_encoder.layers[i]
# self attention
__A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__A = xmod_layer.self_attn.q_proj.weight
__A = xmod_layer.self_attn.q_proj.bias
__A = xmod_layer.self_attn.k_proj.weight
__A = xmod_layer.self_attn.k_proj.bias
__A = xmod_layer.self_attn.v_proj.weight
__A = xmod_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__A = xmod_layer.self_attn.out_proj.weight
__A = xmod_layer.self_attn.out_proj.bias
__A = xmod_layer.self_attn_layer_norm.weight
__A = xmod_layer.self_attn_layer_norm.bias
# intermediate
__A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__A = xmod_layer.fca.weight
__A = xmod_layer.fca.bias
# output
__A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__A = xmod_layer.fca.weight
__A = xmod_layer.fca.bias
__A = xmod_layer.final_layer_norm.weight
__A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__A = xmod_layer.adapter_layer_norm.weight
__A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__A = bert_output.adapter_modules[lang_code]
__A = xmod_layer.adapter_modules[lang_code]
__A = from_adapter.fca.weight
__A = from_adapter.fca.bias
__A = from_adapter.fca.weight
__A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__A = xmod_sent_encoder.layer_norm.weight
__A = xmod_sent_encoder.layer_norm.bias
if classification_head:
__A = xmod.model.classification_heads["mnli"].dense.weight
__A = xmod.model.classification_heads["mnli"].dense.bias
__A = xmod.model.classification_heads["mnli"].out_proj.weight
__A = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__A = xmod.model.encoder.lm_head.dense.weight
__A = xmod.model.encoder.lm_head.dense.bias
__A = xmod.model.encoder.lm_head.layer_norm.weight
__A = xmod.model.encoder.lm_head.layer_norm.bias
__A = xmod.model.encoder.lm_head.weight
__A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = xmod.encode(a_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(a_ )
__A = model(a_ )[0]
if classification_head:
__A = xmod.model.classification_heads["mnli"](xmod.extract_features(a_ ) )
else:
__A = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__A = torch.allclose(a_ , a_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(a_ ).mkdir(parents=a_ , exist_ok=a_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 55 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 0 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : int =PegasusConfig
a : List[str] ={}
a : Optional[int] ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Optional[int] = inputs_dict['input_ids']
UpperCamelCase_: Optional[int] = input_ids[:1, :]
UpperCamelCase_: int = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[int] = inputs_dict['head_mask']
UpperCamelCase_: Optional[int] = 1
# first forward pass
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> str:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a : int =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a : Tuple =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : List[str] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Dict = TFPegasusModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a : int =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a : Union[str, Any] ='''google/pegasus-xsum'''
@cached_property
def _a ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='tf' )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCamelCase_: str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _a ( self ):
self._assert_generated_batch_equal_expected() | 57 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 2
@register_to_config
def __init__(self : Optional[int] , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 100 , UpperCAmelCase_ : float = 1.007 , UpperCAmelCase_ : float = 80 , UpperCAmelCase_ : float = 0.05 , UpperCAmelCase_ : float = 50 , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =sigma_max
# setable values
lowerCamelCase__: int =None
lowerCamelCase__: np.IntTensor =None
lowerCamelCase__: torch.FloatTensor =None # sigma(t_i)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[int] = None) ->torch.FloatTensor:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =num_inference_steps
lowerCamelCase__: List[Any] =np.arange(0 , self.num_inference_steps)[::-1].copy()
lowerCamelCase__: Union[str, Any] =torch.from_numpy(UpperCAmelCase_).to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase__: int =torch.tensor(UpperCAmelCase_ , dtype=torch.floataa , device=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[torch.Generator] = None) ->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase__: List[Any] =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
lowerCamelCase__: Any =0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase__: Any =self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase_).to(sample.device)
lowerCamelCase__: Optional[Any] =sigma + gamma * sigma
lowerCamelCase__: str =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
lowerCamelCase__: Tuple =sample_hat + sigma_hat * model_output
lowerCamelCase__: Optional[Any] =(sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase__: Optional[int] =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =sample_prev + sigma_prev * model_output
lowerCamelCase__: List[Any] =(sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase__: Any =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase_ , derivative=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
raise NotImplementedError()
| 59 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = (DDIMParallelScheduler,)
lowerCamelCase_ : Tuple = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowerCamelCase (self , **__magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__magic_name__ )
return config
def lowerCamelCase (self , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config(**__magic_name__ )
snake_case_ : List[Any] = scheduler_class(**__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = 10, 0.0
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for t in scheduler.timesteps:
snake_case_ : Optional[int] = model(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__magic_name__ )
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config(steps_offset=1 )
snake_case_ : Optional[int] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=__magic_name__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__magic_name__ , num_inference_steps=__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__magic_name__ , eta=__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**__magic_name__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**__magic_name__ )
snake_case_ , snake_case_ : Any = 10, 0.0
scheduler.set_timesteps(__magic_name__ )
snake_case_ : List[Any] = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter
snake_case_ : Optional[Any] = self.dummy_sample_deter + 0.1
snake_case_ : Optional[Any] = self.dummy_sample_deter - 0.1
snake_case_ : str = samplea.shape[0]
snake_case_ : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ : Any = torch.arange(__magic_name__ )[0:3, None].repeat(1 , __magic_name__ )
snake_case_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ : List[Any] = scheduler.batch_step_no_noise(__magic_name__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __magic_name__ )
snake_case_ : str = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : Optional[int] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = self.full_loop()
snake_case_ : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.full_loop(prediction_type='''v_prediction''' )
snake_case_ : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
snake_case_ : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : Dict = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
snake_case_ : int = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 60 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCamelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCamelCase = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCamelCase = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCamelCase = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase__ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase__ = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase__ = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase__ = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase__ = None
if _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase__ = tf_models
lowerCAmelCase__ = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase__ = flax_models
lowerCAmelCase__ = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase__ = pt_models
lowerCAmelCase__ = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase__ = True
break
# Try again after removing the last word in the name
lowerCAmelCase__ = "".join(camel_case_split(lowerCAmelCase_ )[:-1] )
lowerCAmelCase__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase__ = list(lowerCAmelCase_ )
all_models.sort()
lowerCAmelCase__ = {"model_type": all_models}
lowerCAmelCase__ = [pt_models[t] for t in all_models]
lowerCAmelCase__ = [tf_models[t] for t in all_models]
lowerCAmelCase__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase__ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase__ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase__ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase__ = [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
lowerCAmelCase__ = [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
continue
# First extract all model_names
lowerCAmelCase__ = []
for name in getattr(lowerCAmelCase_ , lowerCAmelCase_ ).values():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
model_names.append(lowerCAmelCase_ )
else:
model_names.extend(list(lowerCAmelCase_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = get_frameworks_table()
lowerCAmelCase__ = Dataset.from_pandas(lowerCAmelCase_ )
lowerCAmelCase__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowerCAmelCase_ )
lowerCAmelCase__ = Dataset.from_json(lowerCAmelCase_ )
lowerCAmelCase__ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowerCAmelCase_ ) )
}
lowerCAmelCase__ = update_pipeline_and_auto_class_table(lowerCAmelCase_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase__ = sorted(table.keys() )
lowerCAmelCase__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
lowerCAmelCase__ = Dataset.from_pandas(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase_ , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase_ , "pipeline_tags.json" ) )
if commit_sha is not None:
lowerCAmelCase__ = (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
lowerCAmelCase__ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowerCAmelCase_ , repo_type="dataset" , token=lowerCAmelCase_ , commit_message=lowerCAmelCase_ , )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase__ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase__ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase__ = pipeline_tasks[key]["pt"]
if isinstance(lowerCAmelCase_ , (list, tuple) ):
lowerCAmelCase__ = model[0]
lowerCAmelCase__ = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = ", ".join(lowerCAmelCase_ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
UpperCamelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 61 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 0 |
import functools
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
__UpperCAmelCase : List[str] = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
UpperCAmelCase__ : Union[str, Any] = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 10_0000_0000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 65 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : Optional[Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = num_channels
_lowercase : Tuple = image_size
_lowercase : str = patch_size
_lowercase : Union[str, Any] = text_seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : int = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = coordinate_size
_lowercase : Tuple = shape_size
_lowercase : Any = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[Any] = scope
_lowercase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : Optional[Any] = text_seq_length
_lowercase : Dict = (image_size // patch_size) ** 2 + 1
_lowercase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def __a ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Optional[int] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Any = bbox[i, j, 0]
_lowercase : int = t
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : int = None
if self.use_input_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
_lowercase : str = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
_lowercase : Optional[Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : str = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : str = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = self.num_labels
_lowercase : Optional[Any] = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Union[str, Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ):
_lowercase : List[Any] = LayoutLMvaModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Optional[int] = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
_lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def __a ( self ):
_lowercase : Tuple = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_lowerCAmelCase )
_lowercase : List[Any] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
_lowercase : Tuple = torch.tensor([[1, 2]] )
_lowercase : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase : int = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
_lowercase : Union[str, Any] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Tuple:
_lowercase = {}
with open(snake_case__ , 'r' ) as file:
for line_number, line in enumerate(snake_case__ ):
_lowercase = line.strip()
if line:
_lowercase = line.split()
_lowercase = line_number
_lowercase = words[0]
_lowercase = value
return result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[Any] , snake_case__ :Optional[int] ) -> List[str]:
for attribute in key.split('.' ):
_lowercase = getattr(snake_case__ , snake_case__ )
_lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
_lowercase = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowercase = 'param'
if weight_type is not None and weight_type != "param":
_lowercase = getattr(snake_case__ , snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
_lowercase = hf_pointer
for attribute in hf_param_name.split('.' ):
_lowercase = getattr(snake_case__ , snake_case__ )
_lowercase = shape_pointer.shape
# let's reduce dimension
_lowercase = value[0]
else:
_lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowercase = value
elif weight_type == "weight_g":
_lowercase = value
elif weight_type == "weight_v":
_lowercase = value
elif weight_type == "bias":
_lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_lowercase = getattr(snake_case__ , snake_case__ )
_lowercase = value
else:
_lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Dict , snake_case__ :List[str] , snake_case__ :Dict ) -> List[Any]:
_lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
_lowercase = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowercase = 'param'
if weight_type is not None and weight_type != "param":
_lowercase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowercase = '.'.join([key, hf_param_name] )
else:
_lowercase = key
_lowercase = value if 'lm_head' in full_key else value[0]
snake_case = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :List[str] , snake_case__ :Optional[int]=None , snake_case__ :Tuple=None ) -> Any:
_lowercase = False
for key, mapped_key in MAPPING.items():
_lowercase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowercase = True
if "*" in mapped_key:
_lowercase = name.split(snake_case__ )[0].split('.' )[-2]
_lowercase = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
_lowercase = 'weight_g'
elif "weight_v" in name:
_lowercase = 'weight_v'
elif "bias" in name:
_lowercase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase = 'weight'
else:
_lowercase = None
if hf_dict is not None:
rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return is_used
return is_used
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :int , snake_case__ :Dict ) -> Tuple:
_lowercase = []
_lowercase = fairseq_model.state_dict()
_lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowercase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
_lowercase = True
else:
_lowercase = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[Any] ) -> Union[str, Any]:
_lowercase = full_name.split('conv_layers.' )[-1]
_lowercase = name.split('.' )
_lowercase = int(items[0] )
_lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Dict , snake_case__ :Optional[Any]=None , snake_case__ :int=None , snake_case__ :Dict=True , snake_case__ :str=False ) -> int:
if config_path is not None:
_lowercase = WavaVecaConfig.from_pretrained(snake_case__ )
else:
_lowercase = WavaVecaConfig()
if is_seq_class:
_lowercase = read_txt_into_dict(snake_case__ )
_lowercase = idalabel
_lowercase = WavaVecaForSequenceClassification(snake_case__ )
_lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
_lowercase = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase = target_dict.pad_index
_lowercase = target_dict.bos_index
_lowercase = target_dict.eos_index
_lowercase = len(target_dict.symbols )
_lowercase = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowercase = 0
_lowercase = 1
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(snake_case__ , snake_case__ )
_lowercase = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
_lowercase = True if config.feat_extract_norm == 'layer' else False
_lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_lowercase = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_lowercase = WavaVecaForCTC(snake_case__ )
else:
_lowercase = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
_lowercase , _lowercase , _lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase = argparse.Namespace(task='audio_pretraining' )
_lowercase = fairseq.tasks.setup_task(snake_case__ )
_lowercase , _lowercase , _lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ )
_lowercase = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
snake_case = parser.parse_args()
snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 67 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A = logging.get_logger(__name__)
__A = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'dpt'
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Optional[Any]=384 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=[2, 5, 8, 11] , __SCREAMING_SNAKE_CASE : Dict="project" , __SCREAMING_SNAKE_CASE : int=[4, 2, 1, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[96, 192, 384, 768] , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : int=-1 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=0.4 , __SCREAMING_SNAKE_CASE : Any=255 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 1024, 24, 24] , __SCREAMING_SNAKE_CASE : Tuple=[0, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =hidden_size
__UpperCAmelCase =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__UpperCAmelCase ={
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__UpperCAmelCase =backbone_featmap_shape
__UpperCAmelCase =neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =[]
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__UpperCAmelCase =readout_type
__UpperCAmelCase =reassemble_factors
__UpperCAmelCase =neck_hidden_sizes
__UpperCAmelCase =fusion_hidden_size
__UpperCAmelCase =head_in_index
__UpperCAmelCase =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__UpperCAmelCase =use_auxiliary_head
__UpperCAmelCase =auxiliary_loss_weight
__UpperCAmelCase =semantic_loss_ignore_index
__UpperCAmelCase =semantic_classifier_dropout
def _a ( self : str ) -> Any:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase =self.backbone_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 68 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a : Union[str, Any] = TypeVar('''T''')
a : Union[str, Any] = TypeVar('''U''')
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
def __init__( self : Optional[Any] , a_ : T | None , a_ : U | None ):
"""simple docstring"""
__snake_case = key
__snake_case = val
__snake_case = None
__snake_case = None
def __repr__( self : Dict ):
"""simple docstring"""
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
def __init__( self : Optional[int] ):
"""simple docstring"""
__snake_case = DoubleLinkedListNode(a_ , a_ )
__snake_case = DoubleLinkedListNode(a_ , a_ )
__snake_case , __snake_case = self.rear, self.head
def __repr__( self : Any ):
"""simple docstring"""
__snake_case = ["DoubleLinkedList"]
__snake_case = self.head
while node.next is not None:
rep.append(str(a_ ) )
__snake_case = node.next
rep.append(str(self.rear ) )
return ",\n ".join(a_ )
def A ( self : int , a_ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
__snake_case = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__snake_case = node
__snake_case = previous
__snake_case = node
__snake_case = self.rear
def A ( self : Optional[Any] , a_ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
__snake_case = node.next
__snake_case = node.prev
__snake_case = None
__snake_case = None
return node
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
__SCREAMING_SNAKE_CASE = {}
def __init__( self : Any , a_ : int ):
"""simple docstring"""
__snake_case = DoubleLinkedList()
__snake_case = capacity
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = {}
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Optional[int] , a_ : T ):
"""simple docstring"""
return key in self.cache
def A ( self : int , a_ : T ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
__snake_case = self.cache[key]
__snake_case = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a_ )
return node.val
self.miss += 1
return None
def A ( self : Optional[int] , a_ : T , a_ : U ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__snake_case = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__snake_case = DoubleLinkedListNode(a_ , a_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__snake_case = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__snake_case = value
self.list.add(a_ )
@classmethod
def A ( cls : Dict , a_ : int = 128 ):
"""simple docstring"""
def cache_decorator_inner(a_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*a_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__snake_case = LRUCache(a_ )
__snake_case = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__snake_case = func(*a_ )
cls.decorator_function_to_instance_map[func].put(args[0] , a_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a_ , "cache_info" , a_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 50_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
lowerCamelCase_ = primea * primea
for primea in primes:
lowerCamelCase_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ = primea * primea * primea * primea
lowerCamelCase_ = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( _SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _SCREAMING_SNAKE_CASE , )
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase_ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = image[0].size
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase_ : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCAmelCase_ : Any = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase_ : Dict = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCAmelCase_ : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ : List[str] = 2.0 * image - 1.0
UpperCAmelCase_ : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return image
def a__ ( _SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return mask
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase_ : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = mask[0].size
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ : Tuple = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
UpperCAmelCase_ : Union[str, Any] = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase_ : List[str] = mask.astype(np.floataa ) / 255.0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase_ : Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return mask
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : UNetaDModel
__A : RePaintScheduler
def __init__( self ,_snake_case ,_snake_case ):
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self ,_snake_case ,_snake_case ,_snake_case = 2_50 ,_snake_case = 0.0 ,_snake_case = 10 ,_snake_case = 10 ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Tuple = _preprocess_image(_snake_case )
UpperCAmelCase_ : Union[str, Any] = original_image.to(device=self.device ,dtype=self.unet.dtype )
UpperCAmelCase_ : List[str] = _preprocess_mask(_snake_case )
UpperCAmelCase_ : Dict = mask_image.to(device=self.device ,dtype=self.unet.dtype )
UpperCAmelCase_ : Dict = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_snake_case ,_snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase_ : List[Any] = original_image.shape
UpperCAmelCase_ : Optional[Any] = randn_tensor(_snake_case ,generator=_snake_case ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_snake_case ,_snake_case ,_snake_case ,self.device )
UpperCAmelCase_ : List[str] = eta
UpperCAmelCase_ : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase_ : str = generator[0] if isinstance(_snake_case ,_snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase_ : int = self.unet(_snake_case ,_snake_case ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : int = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase_ : Optional[int] = self.scheduler.undo_step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = t
UpperCAmelCase_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : int ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
lowercase =rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase =years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE = mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = max(
mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) , mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , j - wt[i - 1]) + val[i - 1] , )
SCREAMING_SNAKE_CASE = val
return f[i][j]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
SCREAMING_SNAKE_CASE = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not (isinstance(_UpperCAmelCase , (list, tuple)) and isinstance(_UpperCAmelCase , (list, tuple))):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples')
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
if num_items != len(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(_UpperCAmelCase)} values'''
)
raise ValueError(_UpperCAmelCase)
for i in range(_UpperCAmelCase):
if not isinstance(wt[i] , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = (
'All weights must be integers but got weight of '
F'''type {type(wt[i])} at index {i}'''
)
raise TypeError(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = set()
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return optimal_val, example_optional_set
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase)
else:
optimal_set.add(_UpperCAmelCase)
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , j - wt[i - 1] , _UpperCAmelCase)
if __name__ == "__main__":
a_ : int = [3, 2, 4, 4]
a_ : Union[str, Any] = [4, 3, 2, 3]
a_ : Tuple = 4
a_ : Optional[int] = 6
a_ : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ : Union[str, Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 73 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''char'''
lowerCAmelCase_ = '''bpe'''
lowerCAmelCase_ = '''wp'''
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCAmelCase_ = '''ViTImageProcessor'''
lowerCAmelCase_ = '''MgpstrTokenizer'''
def __init__( self : int , _A : str=None , _A : List[str]=None , **_A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''gpt2''' )
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_A , _A )
def __call__( self : int , _A : str=None , _A : List[Any]=None , _A : List[Any]=None , **_A : str ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE : Any = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : str = encodings['''input_ids''']
return inputs
def UpperCAmelCase__ ( self : List[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = sequences
__SCREAMING_SNAKE_CASE : Tuple = char_preds.size(0 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self._decode_helper(_A , '''char''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''bpe''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''wp''' )
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(_A ):
__SCREAMING_SNAKE_CASE : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
__SCREAMING_SNAKE_CASE : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
__SCREAMING_SNAKE_CASE : Any = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = final_strs
__SCREAMING_SNAKE_CASE : Union[str, Any] = final_scores
__SCREAMING_SNAKE_CASE : int = char_strs
__SCREAMING_SNAKE_CASE : List[Any] = bpe_strs
__SCREAMING_SNAKE_CASE : List[str] = wp_strs
return out
def UpperCAmelCase__ ( self : Any , _A : Optional[int] , _A : List[Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__SCREAMING_SNAKE_CASE : Any = self.char_decode
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Dict = '''[s]'''
elif format == DecodeType.BPE:
__SCREAMING_SNAKE_CASE : Dict = self.bpe_decode
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : Optional[int] = '''#'''
elif format == DecodeType.WORDPIECE:
__SCREAMING_SNAKE_CASE : str = self.wp_decode
__SCREAMING_SNAKE_CASE : Any = 102
__SCREAMING_SNAKE_CASE : Optional[int] = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = [], []
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
__SCREAMING_SNAKE_CASE : Tuple = pred_logits.size(1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = preds_index.view(-1 , _A )[:, 1:]
__SCREAMING_SNAKE_CASE : Dict = decoder(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
__SCREAMING_SNAKE_CASE : List[str] = preds_max_prob[:, 1:]
for index in range(_A ):
__SCREAMING_SNAKE_CASE : Any = preds_str[index].find(_A )
__SCREAMING_SNAKE_CASE : int = preds_str[index][:pred_eos]
__SCREAMING_SNAKE_CASE : Optional[int] = preds_index[index].cpu().tolist()
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(_A ) if eos_token in pred_index else -1
__SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def UpperCAmelCase__ ( self : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 74 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split('''.''' )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : str = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Dict = None
def a__ ( ) -> Dict:
return log_levels
def a__ ( lowerCAmelCase__ = None ) -> logging.Logger:
if name is None:
UpperCAmelCase__ : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def a__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a__ ( ) -> Tuple:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> int:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = False
def a__ ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase__ : str = True
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : str = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(lowerCAmelCase__ )
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase__ = warning_once
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , *_A : Optional[int] , **_A : str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : List[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*_A : int , **_A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : Any , _A : Union[str, Any] , _A : int , _A : str ):
'''simple docstring'''
return
class lowerCamelCase_ :
def __call__( self : List[Any] , *_A : int , **_A : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def lowercase_ ( self : Tuple , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> List[str]:
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 75 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 76 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = emb.weight.shape
__UpperCAmelCase : Dict = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Tuple = Namespace(**checkpoint["cfg"]["model"] )
__UpperCAmelCase : str = checkpoint["model"]
remove_ignore_keys_(UpperCamelCase )
__UpperCAmelCase : int = state_dict["decoder.embed_tokens.weight"].shape[0]
__UpperCAmelCase : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__UpperCAmelCase : Any = XGLMConfig(
vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCAmelCase : Union[str, Any] = XGLMForCausalLM(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
print(UpperCamelCase )
__UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
A = parser.parse_args()
A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 77 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A ( UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ = "pt"
UpperCAmelCase_ = "tf"
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _lowercase (self : Any , __a : Optional[int] ):
UpperCAmelCase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _lowercase (self : int ):
UpperCAmelCase_ = "mock_framework"
# Framework provided - return whatever the user provides
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _lowercase (self : Any ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ = MagicMock(return_value=__a )
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ = MagicMock(return_value=__a )
UpperCAmelCase_ = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
| 78 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _lowerCamelCase ( __lowerCamelCase="" ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = tempfile.mkdtemp()
return os.path.join(__lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase__ : Any = AgentAudio(_lowerCAmelCase )
UpperCAmelCase__ : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowerCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase__ , UpperCAmelCase__ : Dict = sf.read(_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , torch.tensor(_lowerCAmelCase ) , atol=1e-4 ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase__ : Optional[Any] = get_new_path(suffix=""".wav""" )
sf.write(_lowerCAmelCase , _lowerCAmelCase , 16000 )
UpperCAmelCase__ : int = AgentAudio(_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _lowerCAmelCase )
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase__ : Any = AgentImage(_lowerCAmelCase )
UpperCAmelCase__ : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowerCAmelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCAmelCase__ : List[Any] = Image.open(_lowerCAmelCase )
UpperCAmelCase__ : str = AgentImage(_lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCAmelCase__ : Union[str, Any] = Image.open(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = AgentImage(_lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCAmelCase ) )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = """Hey!"""
UpperCAmelCase__ : Optional[Any] = AgentText(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , agent_type.to_string() )
self.assertEqual(_lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A ( _a ):
lowercase_ = 'roformer'
def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class A ( _a ):
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 22 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :BigBirdConfig
__snake_case :jnp.dtype = jnp.floataa
__snake_case :bool = True
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setup()
__lowercase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = FlaxBigBirdForNaturalQuestionsModule
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def cross_entropy(lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
__lowercase = logits.shape[-1]
__lowercase = (labels[..., None] == jnp.arange(lowerCamelCase )[None]).astype("""f4""" )
__lowercase = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
__lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowercase = reduction(lowerCamelCase )
return loss
__lowercase = partial(lowerCamelCase , reduction=jnp.mean )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
__lowercase = cross_entropy(lowerCamelCase , lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __UpperCamelCase :
__snake_case :str = "google/bigbird-roberta-base"
__snake_case :int = 3_0_0_0
__snake_case :int = 1_0_5_0_0
__snake_case :int = 1_2_8
__snake_case :int = 3
__snake_case :int = 1
__snake_case :int = 5
# tx_args
__snake_case :float = 3e-5
__snake_case :float = 0.0
__snake_case :int = 2_0_0_0_0
__snake_case :float = 0.00_95
__snake_case :str = "bigbird-roberta-natural-questions"
__snake_case :str = "training-expt"
__snake_case :str = "data/nq-training.jsonl"
__snake_case :str = "data/nq-validation.jsonl"
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
__lowercase = os.path.join(self.base_dir , self.save_dir )
__lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : Any , _lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = self.collate_fn(_lowerCAmelCase )
__lowercase = jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def _a ( self : Dict , _lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.fetch_inputs(features["""input_ids"""] )
__lowercase = {
"""input_ids""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _a ( self : Optional[Any] , _lowerCAmelCase : list ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : list ) -> Tuple:
"""simple docstring"""
__lowercase = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
if seed is not None:
__lowercase = dataset.shuffle(seed=lowerCamelCase )
for i in range(len(lowerCamelCase ) // batch_size ):
__lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase )
@partial(jax.pmap , axis_name="""batch""" )
def snake_case ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
def loss_fn(lowerCamelCase ):
__lowercase = model_inputs.pop("""start_labels""" )
__lowercase = model_inputs.pop("""end_labels""" )
__lowercase = model_inputs.pop("""pooled_labels""" )
__lowercase = state.apply_fn(**lowerCamelCase , params=lowerCamelCase , dropout_rng=lowerCamelCase , train=lowerCamelCase )
__lowercase , __lowercase , __lowercase = outputs
return state.loss_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
__lowercase , __lowercase = jax.random.split(lowerCamelCase )
__lowercase = jax.value_and_grad(lowerCamelCase )
__lowercase , __lowercase = grad_fn(state.params )
__lowercase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__lowercase = jax.lax.pmean(lowerCamelCase , """batch""" )
__lowercase = state.apply_gradients(grads=lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def snake_case ( lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
__lowercase = model_inputs.pop("""start_labels""" )
__lowercase = model_inputs.pop("""end_labels""" )
__lowercase = model_inputs.pop("""pooled_labels""" )
__lowercase = state.apply_fn(**lowerCamelCase , params=state.params , train=lowerCamelCase )
__lowercase , __lowercase , __lowercase = outputs
__lowercase = state.loss_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __UpperCamelCase ( train_state.TrainState ):
__snake_case :Callable = struct.field(pytree_node=_lowerCAmelCase )
@dataclass
class __UpperCamelCase :
__snake_case :Args
__snake_case :Callable
__snake_case :Callable
__snake_case :Callable
__snake_case :Callable
__snake_case :wandb
__snake_case :Callable = None
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=None ) -> int:
"""simple docstring"""
__lowercase = model.params
__lowercase = TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__lowercase , __lowercase = build_tx(**_lowerCAmelCase )
__lowercase = train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
__lowercase = args
__lowercase = data_collator
__lowercase = lr
__lowercase = params
__lowercase = jax_utils.replicate(_lowerCAmelCase )
return state
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.args
__lowercase = len(_lowerCAmelCase ) // args.batch_size
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=F'Running EPOCH-{epoch}' ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase , __lowercase , __lowercase = self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__lowercase = jax_utils.unreplicate(state.step )
__lowercase = running_loss.item() / i
__lowercase = self.scheduler_fn(state_step - 1 )
__lowercase = self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
__lowercase = len(_lowerCAmelCase ) // self.args.batch_size
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc="""Evaluating ... """ ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase = self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _a ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = jax_utils.unreplicate(_lowerCAmelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_lowerCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _lowerCAmelCase )
print("""DONE""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowerCamelCase , """flax_model.msgpack""" ) , """rb""" ) as f:
__lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase , """opt_state.msgpack""" ) , """rb""" ) as f:
__lowercase = from_bytes(state.opt_state , f.read() )
__lowercase = joblib.load(os.path.join(lowerCamelCase , """args.joblib""" ) )
__lowercase = joblib.load(os.path.join(lowerCamelCase , """data_collator.joblib""" ) )
with open(os.path.join(lowerCamelCase , """training_state.json""" ) , """r""" ) as f:
__lowercase = json.load(lowerCamelCase )
__lowercase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = num_train_steps - warmup_steps
__lowercase = optax.linear_schedule(init_value=lowerCamelCase , end_value=lowerCamelCase , transition_steps=lowerCamelCase )
__lowercase = optax.linear_schedule(init_value=lowerCamelCase , end_value=1e-7 , transition_steps=lowerCamelCase )
__lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def weight_decay_mask(lowerCamelCase ):
__lowercase = traverse_util.flatten_dict(lowerCamelCase )
__lowercase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase )
__lowercase = scheduler_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = optax.adamw(learning_rate=lowerCamelCase , weight_decay=lowerCamelCase , mask=lowerCamelCase )
return tx, lr
| 80 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a (pl.LightningModule ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : List[str] ) -> Any:
super().__init__()
__snake_case : Any = model
__snake_case : List[str] = 2
__snake_case : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __snake_case ( self : List[Any] ) -> List[Any]:
pass
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# load longformer model from model identifier
__snake_case : List[str] = LongformerModel.from_pretrained(__lowerCamelCase )
__snake_case : Union[str, Any] = LightningModel(__lowerCamelCase )
__snake_case : Dict = torch.load(__lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__snake_case : List[str] = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 81 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ''''''
UpperCamelCase = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(self , **_UpperCAmelCase )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {"name": str(_UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str = "rb" , **_UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[str] , **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=False , **_UpperCAmelCase : str ) -> str:
'''simple docstring'''
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 82 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase = json.loads(__SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase = json.loads(__SCREAMING_SNAKE_CASE )
if not mpi_options.get('sagemaker_mpi_enabled' , __SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , snake_case , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
lowercase = torch.device('cpu' )
lowercase = 0
elif is_sagemaker_model_parallel_available():
lowercase = smp.local_rank()
lowercase = torch.device('cuda' , snake_case )
lowercase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
lowercase = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return False
| 84 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_snake_case : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_snake_case : Any = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(UpperCamelCase )
return images
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
_a = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 22 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = False
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
def __call__( self : List[Any] , a_ : ImageInput = None , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : int , )-> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(a_ )
return encoding_image_processor
def __lowercase( self : Any , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : List[Any] , *a_ : Any , **a_ : Dict )-> int:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 |
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase_( lowerCamelCase_ ) -> tuple:
return (data["data"], data["target"])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
_lowercase : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
_lowercase : int = xgb.predict(lowerCamelCase_ )
_lowercase : Optional[Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def UpperCamelCase_( ) -> None:
_lowercase : Tuple = fetch_california_housing()
_lowercase , _lowercase : Any = data_handling(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1 )
_lowercase : Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 89 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def snake_case_ (UpperCamelCase : SplitDict ):
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(UpperCamelCase ) == len(UpperCamelCase )
_a = SplitDict._from_yaml_list(UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase_ = {
"""allenai/led-base-16384""": 16384,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = LEDTokenizer
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict="replace" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Union[str, Any]="</s>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Tuple="<mask>" , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=True , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Optional[int] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
lowercase : Any =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
lowercase : Optional[Any] =add_prefix_space
lowercase : Union[str, Any] =pre_tok_class(**UpperCAmelCase__ )
lowercase : str =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Tuple ='''post_processor'''
lowercase : Union[str, Any] =getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowercase : List[str] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : int =tuple(state['''sep'''] )
if "cls" in state:
lowercase : Tuple =tuple(state['''cls'''] )
lowercase : Union[str, Any] =False
if state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
lowercase : List[Any] =add_prefix_space
lowercase : List[str] =True
if state.get('''trim_offsets''' , UpperCAmelCase__ ) != trim_offsets:
lowercase : Tuple =trim_offsets
lowercase : Optional[int] =True
if changes_to_apply:
lowercase : Any =getattr(UpperCAmelCase__ , state.pop('''type''' ) )
lowercase : str =component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowercase : Tuple =value
def lowerCamelCase_ ( self : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=None ):
'''simple docstring'''
lowercase : str =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : List[str] =[self.sep_token_id]
lowercase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ):
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
lowercase : int ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : int =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : Optional[int] =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase__ )
if needs_to_be_padded:
lowercase : List[Any] =len(UpperCAmelCase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : Dict =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Union[str, Any] =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 92 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase__ :Any = False
if num < 0:
lowerCAmelCase__ :Tuple = True
lowerCAmelCase__ :Optional[Any] = -num
lowerCAmelCase__ :list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''gpt_neo'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , UpperCAmelCase : int=5_0257 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : str=2048 , UpperCAmelCase : str=24 , UpperCAmelCase : Tuple=[[["global", "local"], 12]] , UpperCAmelCase : int=16 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=256 , UpperCAmelCase : List[str]="gelu_new" , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Dict=True , UpperCAmelCase : int=5_0256 , UpperCAmelCase : List[str]=5_0256 , **UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =vocab_size
lowercase : Optional[int] =max_position_embeddings
lowercase : Tuple =hidden_size
lowercase : str =num_layers
lowercase : Optional[Any] =num_heads
lowercase : List[Any] =intermediate_size
lowercase : Union[str, Any] =window_size
lowercase : Optional[Any] =activation_function
lowercase : Union[str, Any] =resid_dropout
lowercase : List[str] =embed_dropout
lowercase : int =attention_dropout
lowercase : List[str] =classifier_dropout
lowercase : List[Any] =layer_norm_epsilon
lowercase : int =initializer_range
lowercase : List[Any] =use_cache
lowercase : Union[str, Any] =bos_token_id
lowercase : Optional[int] =eos_token_id
lowercase : Tuple =attention_types
lowercase : List[str] =self.expand_attention_types_params(UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
@staticmethod
def A__ ( UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
lowercase : int =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase_ ( __A : Any , __A : Tuple , __A : Tuple , __A : Dict ) -> List[str]:
"""simple docstring"""
import torch
lowercase : str =input.size()
lowercase : List[str] =len(__A )
lowercase : Optional[Any] =shape[dimension]
lowercase : Optional[Any] =torch.arange(0 , __A , __A )
lowercase : List[str] =torch.div(sizedim - size , __A , rounding_mode='''floor''' ) + 1
lowercase : Optional[int] =torch.arange(__A ) + low_indices[:min_length][:, None]
lowercase : List[Any] =[slice(__A )] * rank
lowercase : Dict =indices
lowercase : str =input[s]
lowercase : str =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__A )
def lowercase_ ( __A : Tuple , __A : int ) -> List[str]:
"""simple docstring"""
import torch
lowercase : Any =torch.arange(1 , __A )
lowercase : Union[str, Any] =torch.remainder(__A , __A )
lowercase : Any =remainders == 0
lowercase : List[str] =candidates[divisor_indices]
lowercase : Optional[int] =torch.max(__A )
return largest_divisor, torch.div(__A , __A , rounding_mode='''floor''' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase : Union[str, Any] =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
lowercase : List[str] ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase : Dict ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A__ ( self : int ) -> int:
'''simple docstring'''
return self._config.num_heads
def A__ ( self : List[str] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase : List[Any] =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : Tuple =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : Optional[Any] =seqlen + 2
lowercase : List[str] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Optional[Any] =[
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase : Any =common_inputs['''attention_mask''']
if self.use_past:
lowercase : List[Any] =ordered_inputs['''attention_mask'''].dtype
lowercase : List[str] =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self : str ) -> int:
'''simple docstring'''
return 13
| 94 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _a ):
lowercase_ = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**lowerCAmelCase_ )
_a = len(lowerCAmelCase_ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_a = pred_prev_sample
_a = torch.sum(torch.abs(lowerCAmelCase_ ) )
_a = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_a = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(lowerCAmelCase_ )
_a = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [1_00, 87, 50, 1, 0]
_a = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**lowerCAmelCase_ )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 22 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = '''vision-encoder-decoder'''
__magic_name__ = True
def __init__( self : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCAmelCase_ : str = kwargs.pop("encoder" )
UpperCAmelCase_ : Dict = encoder_config.pop("model_type" )
UpperCAmelCase_ : List[Any] = kwargs.pop("decoder" )
UpperCAmelCase_ : Dict = decoder_config.pop("model_type" )
UpperCAmelCase_ : Union[str, Any] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Dict ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.encoder.to_dict()
UpperCAmelCase_ : List[Any] = self.decoder.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : List[Any] = OrderedDict()
UpperCAmelCase_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ : Optional[int] = OrderedDict()
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dummy_input["input_ids"].shape
UpperCAmelCase_ : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ : List[str] = dummy_input.pop("input_ids" )
UpperCAmelCase_ : List[str] = dummy_input.pop("attention_mask" )
UpperCAmelCase_ : List[Any] = torch.zeros(lowerCAmelCase_ )
return common_inputs
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
pass
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" ) -> OnnxConfig:
UpperCAmelCase_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
__lowerCamelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCamelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCamelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase__ ( self : List[str] , __snake_case : List[str] , __snake_case : Dict ) -> List[str]:
__magic_name__: Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__magic_name__: List[str] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__magic_name__: Tuple = evaluate(dataset=__snake_case , predictions=__snake_case )
return score
| 96 |
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22 | 0 |
import mpmath # for roots of unity
import numpy as np
class lowercase__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Union[str, Any]:
# Input as list
lowercase_ = list(poly_a or [0] )[:]
lowercase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase_ = self.__multiply()
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
lowercase_ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return dft[0]
#
lowercase_ = self.c_max_length // 2
while next_ncol > 0:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root**next_ncol
# First half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase_ = new_dft
lowercase_ = next_ncol // 2
return dft[0]
def _lowercase ( self : Dict ) -> Tuple:
lowercase_ = self.__dft('''A''' )
lowercase_ = self.__dft('''B''' )
lowercase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase_ = 2
while next_ncol <= self.c_max_length:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root ** (next_ncol // 2)
lowercase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase_ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> List[Any]:
lowercase_ = '''A = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase_ = '''B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase_ = '''A*B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = os.path.abspath(lowercase )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(lowercase )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCamelCase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCamelCase = name[1:]
# figure out how many levels deep the name is
_UpperCamelCase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowercase )
# read data
_UpperCamelCase = tf.train.load_variable(lowercase, lowercase )
names.append('''/'''.join(lowercase ) )
arrays.append(lowercase )
logger.info(F"""Read a total of {len(lowercase ):,} layers""" )
# Sanity check
if len(set(lowercase ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(lowercase ) )})""" )
_UpperCamelCase = list(set(lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowercase, lowercase ):
_UpperCamelCase = full_name.split('''/''' )
_UpperCamelCase = model
_UpperCamelCase = []
for i, m_name in enumerate(lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
_UpperCamelCase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
_UpperCamelCase = getattr(lowercase, '''embeddings''' )
_UpperCamelCase = getattr(lowercase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
_UpperCamelCase = getattr(lowercase, '''encoder''' )
_UpperCamelCase = getattr(lowercase, '''layer''' )
_UpperCamelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
_UpperCamelCase = getattr(lowercase, '''pooler''' )
_UpperCamelCase = getattr(lowercase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
_UpperCamelCase = getattr(lowercase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
_UpperCamelCase = getattr(lowercase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
_UpperCamelCase = getattr(lowercase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
_UpperCamelCase = getattr(lowercase, '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
_UpperCamelCase = getattr(lowercase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
_UpperCamelCase = getattr(lowercase, '''attention''' )
_UpperCamelCase = getattr(lowercase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
_UpperCamelCase = getattr(lowercase, '''attention''' )
_UpperCamelCase = getattr(lowercase, '''output''' )
_UpperCamelCase = getattr(lowercase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
_UpperCamelCase = getattr(lowercase, '''attention''' )
_UpperCamelCase = getattr(lowercase, '''output''' )
_UpperCamelCase = getattr(lowercase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
_UpperCamelCase = getattr(lowercase, '''output''' )
_UpperCamelCase = getattr(lowercase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
_UpperCamelCase = getattr(lowercase, '''output''' )
_UpperCamelCase = getattr(lowercase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
_UpperCamelCase = getattr(lowercase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
_UpperCamelCase = getattr(lowercase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
_UpperCamelCase = getattr(lowercase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
_UpperCamelCase = getattr(lowercase, '''intermediate''' )
_UpperCamelCase = getattr(lowercase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
_UpperCamelCase = getattr(lowercase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
_UpperCamelCase = getattr(lowercase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
_UpperCamelCase = getattr(lowercase, '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
_UpperCamelCase = '''.'''.join(lowercase )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', lowercase ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''', lowercase ):
_UpperCamelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCamelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCamelCase = torch.from_numpy(lowercase )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def a__ ( lowercase : Union[str, Any], lowercase : Union[str, Any], lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
logger.info(F"""Loading model based on config from {config_path}...""" )
_UpperCamelCase = BertConfig.from_json_file(lowercase )
_UpperCamelCase = BertModel(lowercase )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowercase, lowercase, lowercase )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), lowercase )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 98 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AutoencoderKL
_lowerCamelCase = """sample"""
_lowerCamelCase = 1e-2
@property
def snake_case_ ( self ):
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(__A )
return {"sample": image}
@property
def snake_case_ ( self ):
return (3, 32, 32)
@property
def snake_case_ ( self ):
return (3, 32, 32)
def snake_case_ ( self ):
__a = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def snake_case_ ( self ):
# enable deterministic behavior for gradient checkpointing
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**__A )
model.to(__A )
assert not model.is_gradient_checkpointing and model.training
__a = model(**__A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(__A )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**__A )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__A )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**__A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def snake_case_ ( self ):
__a , __a = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__A )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ):
__a = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__a = model.to(__A )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=__A ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(__A )
with torch.no_grad():
__a = model(__A , sample_posterior=__A , generator=__A ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__A , __A , rtol=1E-2 ) )
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , __A , __A ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(__A ) for s in shape] )}.npy'''
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __A=0 , __A=(4, 3, 512, 512) , __A=False ):
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(__A , __A ) ) ).to(__A ).to(__A )
return image
def snake_case_ ( self , __A="CompVis/stable-diffusion-v1-4" , __A=False ):
__a = """fp16""" if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
__A , subfolder="""vae""" , torch_dtype=__A , revision=__A , )
model.to(__A ).eval()
return model
def snake_case_ ( self , __A=0 ):
if torch_device == "mps":
return torch.manual_seed(__A )
return torch.Generator(device=__A ).manual_seed(__A )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def snake_case_ ( self , __A , __A , __A ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(__A )
__a = self.get_generator(__A )
with torch.no_grad():
__a = model(__A , generator=__A , sample_posterior=__A ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__A , __A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def snake_case_ ( self , __A , __A ):
__a = self.get_sd_vae_model(fpaa=__A )
__a = self.get_sd_image(__A , fpaa=__A )
__a = self.get_generator(__A )
with torch.no_grad():
__a = model(__A , generator=__A , sample_posterior=__A ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(__A )
assert torch_all_close(__A , __A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def snake_case_ ( self , __A , __A , __A ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(__A )
with torch.no_grad():
__a = model(__A ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__A , __A , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def snake_case_ ( self , __A , __A ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(__A , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(__A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(__A )
assert torch_all_close(__A , __A , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def snake_case_ ( self , __A , __A ):
__a = self.get_sd_vae_model(fpaa=__A )
__a = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A )
with torch.no_grad():
__a = model.decode(__A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(__A )
assert torch_all_close(__A , __A , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def snake_case_ ( self , __A ):
__a = self.get_sd_vae_model(fpaa=__A )
__a = self.get_sd_image(__A , shape=(3, 4, 64, 64) , fpaa=__A )
with torch.no_grad():
__a = model.decode(__A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(__A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__A , __A , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def snake_case_ ( self , __A ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(__A , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(__A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(__A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__A , __A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def snake_case_ ( self , __A , __A ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(__A )
__a = self.get_generator(__A )
with torch.no_grad():
__a = model.encode(__A ).latent_dist
__a = dist.sample(generator=__A )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(__A )
__a = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(__A , __A , atol=__A )
| 99 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : List[str] = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.